mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
44 Commits
findPeersW
...
topic-bug-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4cc1f61700 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 | ||
|
|
83d75bcb78 | ||
|
|
ba5f7361ad | ||
|
|
8fa956036a | ||
|
|
58ce1c25f5 | ||
|
|
98532a2df3 | ||
|
|
08be6fde92 | ||
|
|
4585cdc932 | ||
|
|
aa47435c91 | ||
|
|
80eba4e6dd | ||
|
|
606294e17f | ||
|
|
977e923692 | ||
|
|
013b6b1d60 | ||
|
|
66ff6f70b8 | ||
|
|
5b1a9fb077 | ||
|
|
35bc9b1a0f | ||
|
|
02dca85251 | ||
|
|
39b2163702 | ||
|
|
d67ee62efa | ||
|
|
9f9401e615 | ||
|
|
6b89d839f6 | ||
|
|
d826a3c7fe | ||
|
|
900f162467 | ||
|
|
5266d34a22 | ||
|
|
c941e47b7e | ||
|
|
54991bbc52 | ||
|
|
7e32bbc199 | ||
|
|
a1be3d68cd | ||
|
|
88af3f90a5 | ||
|
|
600169a53b | ||
|
|
a5e4fccb47 | ||
|
|
e589588f47 | ||
|
|
41884d8d9d | ||
|
|
db074cbf12 | ||
|
|
360e89767f | ||
|
|
238d5c07df | ||
|
|
2292d955a3 | ||
|
|
76bc30e8ba | ||
|
|
a5c7c6da06 |
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24-alpine
|
||||
FROM golang:1.25.1-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -44,27 +44,27 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
version: v2.4
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
121
.golangci.yml
121
.golangci.yml
@@ -1,90 +1,41 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
|
||||
go: 1.23.5
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
58
WORKSPACE
58
WORKSPACE
@@ -158,15 +158,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
sha256 = "a729c8ed2447c90fe140077689079ca0acfb7580ec41637f312d650ce9d93d96",
|
||||
urls = [
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.6",
|
||||
go_version = "1.25.1",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
consensus_spec_version = "v1.6.0-beta.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
|
||||
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
|
||||
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -300,22 +300,6 @@ filegroup(
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
|
||||
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -327,9 +311,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
|
||||
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
|
||||
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -359,9 +343,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
|
||||
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -375,17 +359,17 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
|
||||
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
strip_prefix = "protobuf-25.1",
|
||||
sha256 = "7c3ebd7aaedd86fa5dc479a0fda803f602caaf78d8aff7ce83b89e1b8ae7442a",
|
||||
strip_prefix = "protobuf-28.3",
|
||||
urls = [
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz",
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v28.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -30,10 +30,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postBlindedBeaconBlockV2Path = "/eth/v2/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -512,7 +513,7 @@ func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.R
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockV2Path, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
@@ -1561,7 +1561,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
@@ -1586,7 +1586,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
@@ -1612,7 +1612,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
|
||||
@@ -290,3 +290,9 @@ type GetProposerLookaheadResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
type GetBlobsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
@@ -56,3 +56,19 @@ type ForkChoiceNodeExtraData struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Target string `json:"target"`
|
||||
}
|
||||
|
||||
type GetDebugDataColumnSidecarsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*DataColumnSidecar `json:"data"`
|
||||
}
|
||||
|
||||
type DataColumnSidecar struct {
|
||||
Index string `json:"index"`
|
||||
Column []string `json:"column"`
|
||||
KzgCommitments []string `json:"kzg_commitments"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
|
||||
KzgCommitmentsInclusionProof []string `json:"kzg_commitments_inclusion_proof"`
|
||||
}
|
||||
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -63,6 +62,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -148,7 +148,6 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -161,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -218,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -268,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -481,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -544,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -576,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -595,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -623,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -642,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -662,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -701,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -725,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -752,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -35,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -247,7 +246,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -284,14 +283,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
// Insert all nodes to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -664,14 +659,14 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -691,8 +686,6 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -726,6 +719,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
@@ -751,14 +745,14 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -775,33 +769,17 @@ func (s *Service) areDataColumnsAvailable(
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
missingCount := uint64(len(missing))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if missingCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
@@ -817,7 +795,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
if _, ok := missing[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -828,10 +806,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
delete(missing, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -839,10 +817,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
@@ -926,16 +904,6 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -3,14 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -133,35 +131,26 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
return errors.New("attested block is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
return errors.New("attested state is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
@@ -169,98 +158,17 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
|
||||
return
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
|
||||
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState,
|
||||
) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
log.Debug("Skip saving light client finality update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
|
||||
log.Debug("Skip saving light client optimistic update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
log.Debug("Processed light client updates")
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
@@ -469,15 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
root := signed.Block().ParentRoot()
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -496,12 +397,13 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 1 {
|
||||
if len(pendingNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
slices.Reverse(pendingNodes)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -2795,6 +2795,11 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
@@ -2817,6 +2822,8 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -2831,10 +2838,9 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
t.Run("no old update", func(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
s.processLightClientUpdates(cfg)
|
||||
// Check that the light client update is saved
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2848,12 +2854,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2877,12 +2883,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
@@ -2952,14 +2958,18 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2977,7 +2987,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2989,7 +3001,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3037,7 +3051,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3099,7 +3115,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3118,7 +3136,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
@@ -3190,6 +3210,11 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
oldOptions []util.LightClientOption
|
||||
@@ -3205,7 +3230,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
{
|
||||
name: "Same age",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()}, // supermajority does not matter here and is only added to result in two different updates
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority(0)}, // supermajority does not matter here and is only added to result in two different updates
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
@@ -3249,14 +3274,14 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3270,7 +3295,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3311,6 +3336,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
s.head = &head{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -3389,15 +3415,18 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
blkRoot, err := lOld.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lOld.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
// check that the old update is saved
|
||||
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
|
||||
@@ -3408,7 +3437,11 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
blkRoot, err := lNew.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lNew.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
// check that the actual old update and the actual new update are different
|
||||
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -93,7 +92,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
@@ -104,17 +103,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validator execution and consensus")
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle da")
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
@@ -135,10 +134,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.postBlockProcess(args); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "post block process")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "update checkpoints")
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if s.slasherEnabled {
|
||||
@@ -152,12 +151,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle caches")
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
@@ -240,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -320,13 +301,28 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
|
||||
return
|
||||
}
|
||||
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up the light client store caches
|
||||
go func() {
|
||||
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not migrate light client store to cold storage")
|
||||
} else {
|
||||
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -192,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -594,11 +596,7 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sss)
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -14,13 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -78,10 +79,7 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain); err != nil {
|
||||
return errors.Wrap(err, "could not insert forkchoice chain")
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, chain[0].Block)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
|
||||
@@ -115,6 +113,7 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
slices.Reverse(chain)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,21 +11,21 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -723,7 +723,8 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dcs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -184,45 +184,54 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateFulu:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockFulu{
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
}
|
||||
|
||||
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
|
||||
return ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
@@ -56,6 +57,7 @@ go_test(
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"ranges_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
|
||||
62
beacon-chain/core/helpers/ranges.go
Normal file
62
beacon-chain/core/helpers/ranges.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// SortedSliceFromMap takes a map with uint64 keys and returns a sorted slice of the keys.
|
||||
func SortedSliceFromMap(toSort map[uint64]bool) []uint64 {
|
||||
slice := make([]uint64, 0, len(toSort))
|
||||
for key := range toSort {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
|
||||
slices.Sort(slice)
|
||||
return slice
|
||||
}
|
||||
|
||||
// PrettySlice returns a pretty string representation of a sorted slice of uint64.
|
||||
// `sortedSlice` must be sorted in ascending order.
|
||||
// Example: [1,2,3,5,6,7,8,10] -> "1-3,5-8,10"
|
||||
func PrettySlice(sortedSlice []uint64) string {
|
||||
if len(sortedSlice) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var result string
|
||||
start := sortedSlice[0]
|
||||
end := sortedSlice[0]
|
||||
|
||||
for i := 1; i < len(sortedSlice); i++ {
|
||||
if sortedSlice[i] == end+1 {
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d,", start)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d,", start, end)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d", start)
|
||||
return result
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d", start, end)
|
||||
return result
|
||||
}
|
||||
|
||||
// SortedPrettySliceFromMap combines SortedSliceFromMap and PrettySlice to return a pretty string representation of the keys in a map.
|
||||
func SortedPrettySliceFromMap(toSort map[uint64]bool) string {
|
||||
sorted := SortedSliceFromMap(toSort)
|
||||
return PrettySlice(sorted)
|
||||
}
|
||||
64
beacon-chain/core/helpers/ranges_test.go
Normal file
64
beacon-chain/core/helpers/ranges_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 3: true, 8: true, 1: true}
|
||||
expected := []uint64{1, 3, 5, 8}
|
||||
|
||||
actual := helpers.SortedSliceFromMap(input)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrettySlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []uint64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty slice",
|
||||
input: []uint64{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only distinct elements",
|
||||
input: []uint64{1, 3, 5, 7, 9},
|
||||
expected: "1,3,5,7,9",
|
||||
},
|
||||
{
|
||||
name: "single range",
|
||||
input: []uint64{1, 2, 3, 4, 5},
|
||||
expected: "1-5",
|
||||
},
|
||||
{
|
||||
name: "multiple ranges and distinct elements",
|
||||
input: []uint64{1, 2, 3, 5, 6, 7, 8, 10, 12, 13, 14},
|
||||
expected: "1-3,5-8,10,12-14",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual := helpers.PrettySlice(tt.input)
|
||||
require.Equal(t, tt.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortedPrettySliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 7: true, 8: true, 10: true}
|
||||
expected := "5,7-8,10"
|
||||
|
||||
actual := helpers.SortedPrettySliceFromMap(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -1,202 +0,0 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
state, err := s.beaconDB.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
if state == nil {
|
||||
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client update for the given period from the database
|
||||
update, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package light_client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
@@ -19,11 +19,11 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -4,15 +4,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
@@ -20,12 +15,9 @@ import (
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of an uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
@@ -117,44 +109,6 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
@@ -194,72 +148,3 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for columnIndex := range numberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,9 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
@@ -31,93 +27,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nil signed block", func(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("empty cells and proofs", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
|
||||
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -63,17 +63,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -256,9 +253,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
for i := range int64(b.N) {
|
||||
// Generate new random sidecars to ensure the KZG backend does not cache anything.
|
||||
sidecars := generateRandomSidecars(b, i, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
|
||||
|
||||
for _, sidecar := range roDataColumnSidecars {
|
||||
for _, sidecar := range sidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
@@ -282,7 +278,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
b.ResetTimer()
|
||||
|
||||
for j := range int64(b.N) {
|
||||
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
allSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for k := int64(0); k < numberOfColumns; k += columnsCount {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
|
||||
@@ -292,10 +288,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -323,8 +317,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
for j := range int64(batchCount) {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
|
||||
allSidecars = append(allSidecars, roDataColumnSidecars)
|
||||
allSidecars = append(allSidecars, sidecars)
|
||||
}
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
@@ -358,7 +351,7 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.RODataColumn {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
@@ -379,20 +372,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataCo
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
rob, err := blocks.NewROBlock(sBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
ErrBlobIndexTooHigh = errors.New("blob index is too high")
|
||||
ErrBlockRootMismatch = errors.New("block root mismatch")
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
ErrNilBlobAndProof = errors.New("nil blob and proof")
|
||||
)
|
||||
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
@@ -28,19 +29,19 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(inVerifiedRoSidecars) == 0 {
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Safely retrieve the first sidecar as a reference.
|
||||
referenceSidecar := inVerifiedRoSidecars[0]
|
||||
referenceSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Check if all columns have the same length and are commmitted to the same block.
|
||||
blobCount := len(referenceSidecar.Column)
|
||||
blockRoot := referenceSidecar.BlockRoot()
|
||||
for _, sidecar := range inVerifiedRoSidecars[1:] {
|
||||
for _, sidecar := range verifiedRoSidecars[1:] {
|
||||
if len(sidecar.Column) != blobCount {
|
||||
return nil, ErrColumnLengthsDiffer
|
||||
}
|
||||
@@ -51,8 +52,8 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(inVerifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range inVerifiedRoSidecars {
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
@@ -62,12 +63,6 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sidecars are verified and are committed to the same block.
|
||||
// All signed block headers, KZG commitments, and inclusion proofs are the same.
|
||||
signedBlockHeader := referenceSidecar.SignedBlockHeader
|
||||
kzgCommitments := referenceSidecar.KzgCommitments
|
||||
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -100,78 +95,20 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
|
||||
// Input sidecars are verified, and we reconstructed ourselves the missing sidecars.
|
||||
// As a consequence, reconstructed sidecars are also verified.
|
||||
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
reconstructedVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
for _, sidecar := range outSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO data column with root")
|
||||
}
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
reconstructedVerifiedRoSidecars = append(reconstructedVerifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
return outVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
// cell proofs are "flattened".
|
||||
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the cells count is equal to the cell proofs count.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
return reconstructedVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
@@ -256,6 +193,89 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns*kzg.BytesPerProof)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
|
||||
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
referenceSidecar := dataColumnSidecars[0]
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
@@ -124,50 +124,6 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
blobCount = 3
|
||||
cellsPerBlob = fieldparams.CellsPerBlob
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Extract blobs and proofs from the sidecars.
|
||||
blobs := make([][]byte, 0, blobCount)
|
||||
cellProofs := make([][]byte, 0, cellsPerBlob)
|
||||
for blobIndex := range blobCount {
|
||||
blob := make([]byte, 0, cellsPerBlob)
|
||||
for columnIndex := range cellsPerBlob {
|
||||
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
|
||||
blob = append(blob, cell...)
|
||||
}
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
|
||||
cellProofs = append(cellProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract the base verified ro sidecars into sidecars.
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
|
||||
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
|
||||
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
|
||||
}
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
@@ -250,7 +206,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// Compute cells and proofs from blob sidecars.
|
||||
var wg errgroup.Group
|
||||
blobs := make([][]byte, blobCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
@@ -267,7 +223,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
cellsAndProofs[i] = cp
|
||||
inputCellsAndProofs[i] = cp
|
||||
|
||||
return nil
|
||||
})
|
||||
@@ -278,25 +234,24 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range cellsAndProofs {
|
||||
for _, cp := range inputCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Construct data column sidecars.
|
||||
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
|
||||
// `TestConstructDataColumnSidecars` tests pass.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
// Compute celles and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to verified data column sidecars.
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -339,3 +294,162 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
// Create proofs for 2 blobs worth of columns
|
||||
cellProofs := make([][]byte, 2*numberOfColumns)
|
||||
|
||||
_, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobs := make([][]byte, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
expectedCellsAndProofs[i] = cp
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range expectedCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
t.Run("nil blob and proof", func(t *testing.T) {
|
||||
_, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
const blobCount = 2
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
var wg errgroup.Group
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
expectedCellsAndProofs[i] = cellsAndProofs
|
||||
|
||||
kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs))
|
||||
for _, proof := range cellsAndProofs.Proofs {
|
||||
kzgProofs = append(kzgProofs, proof[:])
|
||||
}
|
||||
|
||||
blobAndProof := &pb.BlobAndProofV2{
|
||||
Blob: blob,
|
||||
KzgProofs: kzgProofs,
|
||||
}
|
||||
blobsAndProofs[i] = blobAndProof
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,12 +1,76 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilSignedBlockOrEmptyCellsAndProofs = errors.New("nil signed block or empty cells and proofs")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
)
|
||||
|
||||
var (
|
||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
BlockType = "BeaconBlock"
|
||||
SidecarType = "DataColumnSidecar"
|
||||
)
|
||||
|
||||
type (
|
||||
// ConstructionPopulator is an interface that can be satisfied by a type that can use data from a struct
|
||||
// like a DataColumnSidecar or a BeaconBlock to set the fields in a data column sidecar that cannot
|
||||
// be obtained from the engine api.
|
||||
ConstructionPopulator interface {
|
||||
Slot() primitives.Slot
|
||||
Root() [fieldparams.RootLength]byte
|
||||
ProposerIndex() primitives.ValidatorIndex
|
||||
Commitments() ([][]byte, error)
|
||||
Type() string
|
||||
|
||||
extract() (*blockInfo, error)
|
||||
}
|
||||
|
||||
// BlockReconstructionSource is a ConstructionPopulator that uses a beacon block as the source of data
|
||||
BlockReconstructionSource struct {
|
||||
blocks.ROBlock
|
||||
}
|
||||
|
||||
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
|
||||
SidecarReconstructionSource struct {
|
||||
blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
blockInfo struct {
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||
kzgCommitments [][]byte
|
||||
kzgInclusionProof [][]byte
|
||||
}
|
||||
)
|
||||
|
||||
// PopulateFromBlock creates a BlockReconstructionSource from a beacon block
|
||||
func PopulateFromBlock(block blocks.ROBlock) *BlockReconstructionSource {
|
||||
return &BlockReconstructionSource{ROBlock: block}
|
||||
}
|
||||
|
||||
// PopulateFromSidecar creates a SidecarReconstructionSource from a data column sidecar
|
||||
func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstructionSource {
|
||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
@@ -28,3 +92,158 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars, given ConstructionPopulator and the cells/proofs associated with each blob in the
|
||||
// block, assembles sidecars which can be distributed to peers.
|
||||
// This is an adapted version of
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars,
|
||||
// which is designed to be used both when constructing sidecars from a block and from a sidecar, replacing
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
info, err := src.extract()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
KzgCommitments: info.kzgCommitments,
|
||||
KzgProofs: proofs[idx],
|
||||
SignedBlockHeader: info.signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
|
||||
}
|
||||
|
||||
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ro data column")
|
||||
}
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return roSidecars, nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the source
|
||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the source
|
||||
func (s *BlockReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
||||
return s.Block().ProposerIndex()
|
||||
}
|
||||
|
||||
// Commitments returns the blob KZG commitments of the source
|
||||
func (s *BlockReconstructionSource) Commitments() ([][]byte, error) {
|
||||
c, err := s.Block().Body().BlobKzgCommitments()
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *BlockReconstructionSource) Type() string {
|
||||
return BlockType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
|
||||
block := b.Block()
|
||||
|
||||
header, err := b.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "header")
|
||||
}
|
||||
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "commitments")
|
||||
}
|
||||
|
||||
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof kzg commitments")
|
||||
}
|
||||
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: header,
|
||||
kzgCommitments: commitments,
|
||||
kzgInclusionProof: inclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
|
||||
// and returns a 2D slice where x is columns and y is rows.
|
||||
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
cellCols := make([][][]byte, numCols)
|
||||
proofCols := make([][][]byte, numCols)
|
||||
for i, cp := range rows {
|
||||
if uint64(len(cp.Cells)) != numCols {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
|
||||
}
|
||||
if len(cp.Cells) != len(cp.Proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(rows))
|
||||
proofCols[j] = make([][]byte, len(rows))
|
||||
}
|
||||
cellCols[j][i] = cp.Cells[j][:]
|
||||
proofCols[j][i] = cp.Proofs[j][:]
|
||||
}
|
||||
}
|
||||
return cellCols, proofCols, nil
|
||||
}
|
||||
|
||||
// Root returns the block root of the source
|
||||
func (s *SidecarReconstructionSource) Root() [fieldparams.RootLength]byte {
|
||||
return s.BlockRoot()
|
||||
}
|
||||
|
||||
// Commmitments returns the blob KZG commitments of the source
|
||||
func (s *SidecarReconstructionSource) Commitments() ([][]byte, error) {
|
||||
return s.KzgCommitments, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *SidecarReconstructionSource) Type() string {
|
||||
return SidecarType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: s.SignedBlockHeader,
|
||||
kzgCommitments: s.KzgCommitments,
|
||||
kzgInclusionProof: s.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,15 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
@@ -53,3 +57,218 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
require.ErrorContains(t, "not enough proofs", err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
// Verify each sidecar has the expected structure
|
||||
for i, sidecar := range sidecars {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 2, len(sidecar.Column))
|
||||
require.Equal(t, 2, len(sidecar.KzgCommitments))
|
||||
require.Equal(t, 2, len(sidecar.KzgProofs))
|
||||
|
||||
// Verify commitments match what we set
|
||||
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
|
||||
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
|
||||
|
||||
// Verify column data comes from the correct cells
|
||||
require.Equal(t, byte(i), sidecar.Column[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.Column[1][0])
|
||||
|
||||
// Verify proofs come from the correct proofs
|
||||
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
t.Run("from block", func(t *testing.T) {
|
||||
src := peerdas.PopulateFromBlock(rob)
|
||||
require.Equal(t, rob.Block().Slot(), src.Slot())
|
||||
require.Equal(t, rob.Root(), src.Root())
|
||||
require.Equal(t, rob.Block().ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.BlockType, src.Type())
|
||||
})
|
||||
|
||||
t.Run("from sidecar", func(t *testing.T) {
|
||||
referenceSidecar := blocks.NewVerifiedRODataColumn(sidecars[0])
|
||||
src := peerdas.PopulateFromSidecar(referenceSidecar)
|
||||
require.Equal(t, referenceSidecar.Slot(), src.Slot())
|
||||
require.Equal(t, referenceSidecar.BlockRoot(), src.Root())
|
||||
require.Equal(t, referenceSidecar.ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.SidecarType, src.Type())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -122,18 +122,18 @@ type BlobStorage struct {
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes")
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
log.Info("Blob filesystem cache warm-up started")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
|
||||
@@ -259,7 +259,6 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned channel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
|
||||
@@ -4,17 +4,26 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var errTimeOut = errors.New("operation timed out")
|
||||
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
_ context.Context, maxEpoch primitives.Epoch,
|
||||
ctx context.Context, maxEpoch primitives.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// In some cases, pruning may take a very long time and consume significant memory in the
|
||||
// open Update transaction. Therefore, we impose a 1 minute timeout on this operation.
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedEndPruneEpoch, uint64(maxEpoch))
|
||||
@@ -48,13 +57,18 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
c := signingRootsBkt.Cursor()
|
||||
|
||||
// We begin a pruning iteration starting from the first item in the bucket.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
// Exit the routine if the context has expired.
|
||||
return errTimeOut
|
||||
}
|
||||
|
||||
// We check the epoch from the current key in the database.
|
||||
// If we have hit an epoch that is greater than the end epoch of the pruning process,
|
||||
// we then completely exit the process as we are done.
|
||||
@@ -67,18 +81,27 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// so it is possible we have a few adjacent objects that have the same slot, such as
|
||||
// (target_epoch = 3 ++ _) => encode(attestation)
|
||||
if err := signingRootsBkt.Delete(k); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation signing root")
|
||||
}
|
||||
if err := attRecordsBkt.Delete(v); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation record")
|
||||
}
|
||||
slasherAttestationsPrunedTotal.Inc()
|
||||
numPruned++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if errors.Is(err, errTimeOut) {
|
||||
log.Warning("Aborting pruning routine")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to prune attestations")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -103,6 +104,7 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -101,10 +102,7 @@ const (
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -123,7 +121,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -144,10 +142,9 @@ var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
defer func(start time.Time) {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
}(time.Now())
|
||||
|
||||
d := time.Now().Add(time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
@@ -185,7 +182,10 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
if result.ValidationError != "" {
|
||||
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
|
||||
log.WithField("status", result.Status.String()).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", parentBlockRoot)).
|
||||
WithError(errors.New(result.ValidationError)).
|
||||
Error("Got a validation error in newPayload")
|
||||
}
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
@@ -197,7 +197,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
return nil, errors.Wrapf(ErrUnknownPayloadStatus, "unknown payload status: %s", result.Status.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -651,22 +651,40 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
root := populator.Root()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||
commitments, err := populator.Commitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||
}
|
||||
|
||||
cellsAndProofs, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, populator)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
return verifiedROSidecars, nil
|
||||
}
|
||||
|
||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
@@ -677,47 +695,32 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
return nil, errors.Wrapf(err, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from execution client")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
|
||||
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
// Finally, construct verified RO data column sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||
func upgradeSidecarsToVerifiedSidecars(roSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, roSidecar := range roSidecars {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
return verifiedRODataColumns
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
@@ -1009,6 +1012,6 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
func wrapWithBlockRoot(err error, blockRoot [fieldparams.RootLength]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -927,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
@@ -2556,7 +2557,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -2580,11 +2581,14 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(sb, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
@@ -2594,7 +2598,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
@@ -2607,23 +2611,22 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
// t.Run("missing some blobs", func(t *testing.T) {
|
||||
// blobMasks := []bool{false, true, true, true, true, true}
|
||||
// srv := createBlobServerV2(t, 6, blobMasks)
|
||||
// defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
// rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
// defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
// _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
// require.ErrorContains(t, "fetch cells and proofs from execution client", err)
|
||||
// })
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -116,7 +117,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -463,22 +463,20 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
}
|
||||
|
||||
// InsertChain inserts all nodes corresponding to blocks in the slice
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
// where blocks[i].Parent = blocks[i+1]. Also, we assume that the parent of the
|
||||
// last block in this list is already included in forkchoice store.
|
||||
// `blocks`. This slice must be ordered in increasing slot order and
|
||||
// each consecutive entry must be a child of the previous one.
|
||||
// The parent of the first block in this list must already be present in forkchoice.
|
||||
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
for _, bcp := range chain {
|
||||
if _, err := f.store.insert(ctx,
|
||||
chain[i].Block,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
bcp.Block,
|
||||
bcp.JustifiedCheckpoint.Epoch, bcp.FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.updateCheckpoints(ctx, chain[i].JustifiedCheckpoint, chain[i].FinalizedCheckpoint); err != nil {
|
||||
if err := f.updateCheckpoints(ctx, bcp.JustifiedCheckpoint, bcp.FinalizedCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -630,14 +630,15 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
args[i] = blks[10-i-1]
|
||||
}
|
||||
require.NoError(t, f.InsertChain(t.Context(), args))
|
||||
// InsertChain now expects blocks in increasing slot order
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks))
|
||||
|
||||
// Test partial insertion: first insert the foundation blocks, then a subset
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertChain(t.Context(), args[2:]))
|
||||
// Insert first 2 blocks to establish a chain from genesis
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[:2]))
|
||||
// Then insert the remaining blocks
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[2:]))
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
|
||||
@@ -3,11 +3,12 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache.go",
|
||||
"helpers.go",
|
||||
"lightclient.go",
|
||||
"store.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
@@ -38,25 +39,30 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"lightclient_test.go",
|
||||
"store_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
26
beacon-chain/light-client/cache.go
Normal file
26
beacon-chain/light-client/cache.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// cache tracks LC data over the non finalized chain for different branches.
|
||||
type cache struct {
|
||||
items map[[32]byte]*cacheItem
|
||||
}
|
||||
|
||||
// cacheItem represents the LC data for a block. It tracks the best update and finality update seen in that branch.
|
||||
type cacheItem struct {
|
||||
parent *cacheItem // parent item in the cache, can be nil
|
||||
period uint64 // sync committee period
|
||||
slot primitives.Slot // slot of the signature block
|
||||
bestUpdate interfaces.LightClientUpdate
|
||||
bestFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
}
|
||||
|
||||
func newLightClientCache() *cache {
|
||||
return &cache{
|
||||
items: make(map[[32]byte]*cacheItem),
|
||||
}
|
||||
}
|
||||
24
beacon-chain/light-client/cache_test.go
Normal file
24
beacon-chain/light-client/cache_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestLCCache(t *testing.T) {
|
||||
lcCache := newLightClientCache()
|
||||
require.NotNil(t, lcCache)
|
||||
|
||||
item := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: nil,
|
||||
bestFinalityUpdate: nil,
|
||||
}
|
||||
|
||||
blkRoot := [32]byte{4, 5, 6}
|
||||
|
||||
lcCache.items[blkRoot] = item
|
||||
|
||||
require.Equal(t, item, lcCache.items[blkRoot], "Expected to find the item in the cache")
|
||||
}
|
||||
@@ -592,6 +592,10 @@ func HasFinality(update interfaces.LightClientUpdate) (bool, error) {
|
||||
}
|
||||
|
||||
func IsBetterUpdate(newUpdate, oldUpdate interfaces.LightClientUpdate) (bool, error) {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
maxActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Len()
|
||||
newNumActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
oldNumActiveParticipants := oldUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
@@ -778,7 +782,7 @@ func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClie
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -804,7 +808,7 @@ func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityU
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
consensustypes "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
383
beacon-chain/light-client/store.go
Normal file
383
beacon-chain/light-client/store.go
Normal file
@@ -0,0 +1,383 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
cache *cache // non finality cache
|
||||
}
|
||||
|
||||
func NewLightClientStore(p p2p.Accessor, e event.SubscriberSender, db iface.HeadAccessDatabase) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
cache: newLightClientCache(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) SaveLCData(ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
headBlockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// compute required data
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client update")
|
||||
}
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client finality update")
|
||||
}
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client optimistic update")
|
||||
}
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
blockRoot, err := attestedBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute attested block root")
|
||||
}
|
||||
parentRoot := [32]byte(update.AttestedHeader().Beacon().ParentRoot)
|
||||
signatureBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute signature block root")
|
||||
}
|
||||
|
||||
newBlockIsHead := signatureBlockRoot == headBlockRoot
|
||||
|
||||
// create the new cache item
|
||||
newCacheItem := &cacheItem{
|
||||
period: period,
|
||||
slot: attestedBlock.Block().Slot(),
|
||||
}
|
||||
|
||||
// check if parent exists in cache
|
||||
parentItem, ok := s.cache.items[parentRoot]
|
||||
if ok {
|
||||
newCacheItem.parent = parentItem
|
||||
} else {
|
||||
// if not, create an item for the parent, but don't need to save it since it's the accumulated best update and is just used for comparison
|
||||
bestUpdateSoFar, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get best light client update for period %d", period)
|
||||
}
|
||||
parentItem = &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: bestUpdateSoFar,
|
||||
bestFinalityUpdate: s.lastFinalityUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
// if at a period boundary, no need to compare data, just save new ones
|
||||
if parentItem.period != period {
|
||||
newCacheItem.bestUpdate = update
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
|
||||
// if the new block is not head, we don't want to change our lastFinalityUpdate
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(finalityUpdate, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// if in the same period, compare updates
|
||||
isUpdateBetter, err := IsBetterUpdate(update, parentItem.bestUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
if isUpdateBetter {
|
||||
newCacheItem.bestUpdate = update
|
||||
} else {
|
||||
newCacheItem.bestUpdate = parentItem.bestUpdate
|
||||
}
|
||||
|
||||
isBetterFinalityUpdate := IsBetterFinalityUpdate(finalityUpdate, parentItem.bestFinalityUpdate)
|
||||
if isBetterFinalityUpdate {
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
} else {
|
||||
newCacheItem.bestFinalityUpdate = parentItem.bestFinalityUpdate
|
||||
}
|
||||
|
||||
// save new item in cache
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
// save lastOptimisticUpdate if better
|
||||
if isBetterOptimisticUpdate := IsBetterOptimisticUpdate(optimisticUpdate, s.lastOptimisticUpdate); isBetterOptimisticUpdate {
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
}
|
||||
|
||||
// if the new block is considered the head, set the last finality update
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(newCacheItem.bestFinalityUpdate, isBetterFinalityUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, state state.BeaconState) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from the database")
|
||||
}
|
||||
|
||||
cacheUpdatesByPeriod, err := s.getCacheUpdatesByPeriod(headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from cache")
|
||||
}
|
||||
|
||||
for period, update := range cacheUpdatesByPeriod {
|
||||
updatesMap[period] = update
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientUpdate, error) {
|
||||
// we don't need to lock here because the LightClientUpdates method locks the store
|
||||
updates, err := s.LightClientUpdates(ctx, period, period, headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get light client update for period %d", period)
|
||||
}
|
||||
if len(updates) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return updates[0], nil
|
||||
}
|
||||
|
||||
func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaconBlock) (map[uint64]interfaces.LightClientUpdate, error) {
|
||||
updatesByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
|
||||
cacheHeadRoot := headBlock.Block().ParentRoot()
|
||||
|
||||
cacheHeadItem, ok := s.cache.items[cacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Head root %x not found in light client cache. Returning empty updates map for non finality cache.", cacheHeadRoot)
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
for cacheHeadItem != nil {
|
||||
if _, exists := updatesByPeriod[cacheHeadItem.period]; !exists {
|
||||
updatesByPeriod[cacheHeadItem.period] = cacheHeadItem.bestUpdate
|
||||
}
|
||||
cacheHeadItem = cacheHeadItem.parent
|
||||
}
|
||||
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastFinalityUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastOptimisticUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
|
||||
func (s *Store) MigrateToCold(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// If there cache is empty (some problem in processing data), we can skip migration.
|
||||
// This is a safety check and should not happen in normal operation.
|
||||
if len(s.cache.items) == 0 {
|
||||
log.Debug("Non-finality cache is empty. Skipping migration.")
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
finalizedSlot := blk.Block().Slot()
|
||||
finalizedCacheHeadRoot := blk.Block().ParentRoot()
|
||||
|
||||
var finalizedCacheHead *cacheItem
|
||||
var ok bool
|
||||
|
||||
finalizedCacheHead, ok = s.cache.items[finalizedCacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Finalized block's parent root %x not found in light client cache. Cleaning the broken part of the cache.", finalizedCacheHeadRoot)
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
updateByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
// Traverse the cache from the head item to the tail, collecting updates
|
||||
for item := finalizedCacheHead; item != nil; item = item.parent {
|
||||
if _, seen := updateByPeriod[item.period]; seen {
|
||||
// We already have an update for this period, skip this item
|
||||
continue
|
||||
}
|
||||
updateByPeriod[item.period] = item.bestUpdate
|
||||
}
|
||||
|
||||
// save updates to db
|
||||
for period, update := range updateByPeriod {
|
||||
err = s.beaconDB.SaveLightClientUpdate(ctx, period, update)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("failed to save light client update for period %d. Skipping this period.", period)
|
||||
}
|
||||
}
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) cleanCache(finalizedSlot primitives.Slot) {
|
||||
// delete non-finality cache items older than finalized slot
|
||||
for k, v := range s.cache.items {
|
||||
if v.slot < finalizedSlot {
|
||||
delete(s.cache.items, k)
|
||||
}
|
||||
if v.parent != nil && v.parent.slot < finalizedSlot {
|
||||
v.parent = nil // remove parent reference
|
||||
}
|
||||
}
|
||||
}
|
||||
959
beacon-chain/light-client/store_test.go
Normal file
959
beacon-chain/light-client/store_test.go
Normal file
@@ -0,0 +1,959 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update2, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority(0))
|
||||
update3, err := NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0))
|
||||
update4, err := NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update5, err := NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SaveLCData(t *testing.T) {
|
||||
t.Run("no parent in cache or db - new is head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("no parent in cache or db - new not head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.IsNil(t, s.lastFinalityUpdate, "Expected to not find the last finality update in the store since the block is not head")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in db", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
// save an update for this period in db
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveLightClientUpdate(l.Ctx, period, update), "Failed to save light client update in db")
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in cache", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period in cache
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in the previous period", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Bellatrix, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period1 in cache
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period1,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClientStore_MigrateToCold(t *testing.T) {
|
||||
// This tests the scenario where chain advances but the cache is empty.
|
||||
// It should see that there is nothing in the cache to migrate and just update the tail to the new finalized root.
|
||||
t.Run("empty cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items))
|
||||
})
|
||||
|
||||
// This tests the scenario where chain advances but the CANONICAL cache is empty.
|
||||
// It should see that there is nothing in the canonical cache to migrate and just update the tail to the new finalized root AND delete anything non-canonical.
|
||||
t.Run("non canonical cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 1, len(s.cache.items))
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items.
|
||||
// should update the update in db and delete cache.
|
||||
t.Run("mixed cache - finality immediately after cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastAttestedRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastBlockRoot, err = l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items. finalized height is in the middle.
|
||||
// should update the update in db and delete items in cache before finalized slot.
|
||||
t.Run("mixed cache - finality middle of cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// we have multiple periods in the cache before finalization happens. we expect all of them to be saved in db.
|
||||
t.Run("finality after multiple periods in cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.EpochsPerSyncCommitteePeriod = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period1 in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period1, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdatePeriod1 := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod1, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
period2 := period1
|
||||
var lastUpdatePeriod2 interfaces.LightClientUpdate
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod2, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
period2 = slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
}
|
||||
|
||||
require.Equal(t, 6, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 10, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod2, u)
|
||||
u, err = beaconDB.LightClientUpdate(ctx, period1)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod1, u)
|
||||
})
|
||||
}
|
||||
|
||||
func saveInitialFinalizedCheckpointData(t *testing.T, ctx context.Context, beaconDB db.Database) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesisState, genesisRoot))
|
||||
|
||||
finalizedState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedBlock := util.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = 32
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockHeader, err := signedFinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, finalizedState.SetLatestBlockHeader(finalizedBlockHeader.Header))
|
||||
finalizedStateRoot, err := finalizedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
finalizedBlock.Block.StateRoot = finalizedStateRoot[:]
|
||||
signedFinalizedBlock, err = blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockRoot, err := signedFinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ethpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: finalizedBlockRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedFinalizedBlock))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &cp))
|
||||
|
||||
return finalizedBlockRoot, signedFinalizedBlock
|
||||
}
|
||||
|
||||
func TestLightClientStore_LightClientUpdatesByRange(t *testing.T) {
|
||||
t.Run("no updates", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 2, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(updates))
|
||||
})
|
||||
|
||||
t.Run("single update from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 5, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("single update from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItem := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItem
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{3})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - no overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
for i := 0; i < 5; i++ {
|
||||
require.DeepEqual(t, update, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update1, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update2, err := NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepNotEqual(t, update1, update2)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update1))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update2,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
// first two updates should be update1
|
||||
for i := 0; i < 2; i++ {
|
||||
require.DeepEqual(t, update1, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
// next three updates should be update2 - as cache overrides db
|
||||
for i := 2; i < 5; i++ {
|
||||
require.DeepEqual(t, update2, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("first continuous range", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func saveStateAndBlockWithParentRoot(t *testing.T, ctx context.Context, d db.Database, parentRoot [32]byte) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, d, blk)
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveState(ctx, st, blkRoot))
|
||||
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
return blkRoot, signedFinalizedBlock
|
||||
}
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -32,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/monitor"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
@@ -253,10 +253,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
@@ -349,6 +345,11 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
return errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
log.Debugln("Registering Light Client Store")
|
||||
beacon.registerLightClientStore()
|
||||
}
|
||||
|
||||
log.Debugln("Registering Backfill Service")
|
||||
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register Back Fill service")
|
||||
@@ -1139,6 +1140,11 @@ func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.
|
||||
return b.services.RegisterService(bf)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerLightClientStore() {
|
||||
lcs := lightclient.NewLightClientStore(b.fetchP2P(), b.StateFeed(), b.db)
|
||||
b.lcStore = lcs
|
||||
}
|
||||
|
||||
func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
for _, flag := range features.NetworkFlags {
|
||||
for _, name := range flag.Names() {
|
||||
|
||||
@@ -74,7 +74,9 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("enable-light-client", true, "enable light client")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
require.NoError(t, set.Set("enable-light-client", "true"))
|
||||
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
@@ -88,6 +90,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, node.lcStore)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
node.Start()
|
||||
|
||||
@@ -60,6 +60,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
@@ -308,19 +309,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", dataColumnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
@@ -330,16 +325,15 @@ func (s *Service) BroadcastDataColumnSidecar(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
@@ -385,8 +379,9 @@ func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
"blobCount": len(dataColumnSidecar.Column),
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
|
||||
@@ -711,13 +711,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Subscribe to the topic.
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
@@ -727,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
@@ -739,5 +734,5 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(t, &result, sidecar)
|
||||
require.DeepEqual(t, &result, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errNoCustodyInfo = errors.New("no custody info available")
|
||||
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
@@ -30,7 +32,7 @@ func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
return 0, errNoCustodyInfo
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
|
||||
@@ -79,7 +79,7 @@ func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
|
||||
rawListener, err := listenerCreator()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new listener")
|
||||
return nil, errors.Wrap(err, "create new listener")
|
||||
}
|
||||
return &listenerWrapper{
|
||||
listener: rawListener,
|
||||
@@ -536,7 +536,7 @@ func (s *Service) createListener(
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
return nil, errors.Wrap(err, "create local node")
|
||||
}
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
@@ -604,13 +604,27 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
|
||||
const delay = 1 * time.Second
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
var custodyGroupCount uint64
|
||||
|
||||
err := errNoCustodyInfo
|
||||
for errors.Is(err, errNoCustodyInfo) {
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if errors.Is(err, errNoCustodyInfo) {
|
||||
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
@@ -652,7 +666,7 @@ func (s *Service) startDiscoveryV5(
|
||||
}
|
||||
wrappedListener, err := newListener(createListener)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
return nil, errors.Wrap(err, "create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -51,7 +52,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -168,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -232,7 +233,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -498,8 +499,39 @@ func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
// falls back to the minimum number of custody groups an honest node should custodiy
|
||||
// and serve samples from if ENR is not available.
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody group count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
// On error, default to the ENR value.
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodyGroupCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -509,7 +541,7 @@ func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
// Retrieve the custody group count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
return custodyRequirement
|
||||
|
||||
@@ -20,10 +20,10 @@ go_library(
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
|
||||
@@ -102,11 +102,11 @@ func (s *Service) endpoints(
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(stater, coreService)...)
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
endpoints = append(endpoints, s.lightClientEndpoints(blocker, stater)...)
|
||||
endpoints = append(endpoints, s.lightClientEndpoints()...)
|
||||
}
|
||||
|
||||
if enableDebug {
|
||||
endpoints = append(endpoints, s.debugEndpoints(stater)...)
|
||||
endpoints = append(endpoints, s.debugEndpoints(stater, blocker)...)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
@@ -194,6 +194,8 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
const namespace = "blob"
|
||||
return []endpoint{
|
||||
{
|
||||
// Deprecated: /eth/v1/beacon/blob_sidecars/{block_id} in favor of /eth/v1/beacon/blobs/{block_id}
|
||||
// the endpoint will continue to work post fulu for some time however
|
||||
template: "/eth/v1/beacon/blob_sidecars/{block_id}",
|
||||
name: namespace + ".Blobs",
|
||||
middleware: []middleware.Middleware{
|
||||
@@ -203,6 +205,16 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blobs/{block_id}",
|
||||
name: namespace + ".GetBlobs",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlobs,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1034,9 +1046,10 @@ func (*Service) configEndpoints() []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Stater) []endpoint {
|
||||
func (s *Service) lightClientEndpoints() []endpoint {
|
||||
server := &lightclient.Server{
|
||||
LCStore: s.cfg.LCStore,
|
||||
LCStore: s.cfg.LCStore,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
|
||||
const namespace = "lightclient"
|
||||
@@ -1084,7 +1097,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
func (s *Service) debugEndpoints(stater lookup.Stater, blocker lookup.Blocker) []endpoint {
|
||||
server := &debug.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
@@ -1094,6 +1107,8 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
const namespace = "debug"
|
||||
@@ -1128,6 +1143,16 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
handler: server.GetForkChoice,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/debug/beacon/data_column_sidecars/{block_id}",
|
||||
name: namespace + ".GetDataColumnSidecars",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.DataColumnSidecars,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ func Test_endpoints(t *testing.T) {
|
||||
|
||||
blobRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blobs/{block_id}": {http.MethodGet},
|
||||
}
|
||||
|
||||
configRoutes := map[string][]string{
|
||||
@@ -79,9 +80,10 @@ func Test_endpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
debugRoutes := map[string][]string{
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v1/debug/fork_choice": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v1/debug/fork_choice": {http.MethodGet},
|
||||
"/eth/v1/debug/beacon/data_column_sidecars/{block_id}": {http.MethodGet},
|
||||
}
|
||||
|
||||
eventsRoutes := map[string][]string{
|
||||
|
||||
@@ -1465,8 +1465,7 @@ func (s *Server) GetBlockHeader(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockID))
|
||||
ok := shared.WriteBlockFetchError(w, blk, err)
|
||||
if !ok {
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
blockHeader, err := blk.Header()
|
||||
|
||||
@@ -109,10 +109,10 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
// future epochs and epochs too far back are not supported.
|
||||
randaoEpochLowerBound := uint64(0)
|
||||
// Lower bound should not underflow.
|
||||
if uint64(stEpoch) > uint64(st.RandaoMixesLength()) {
|
||||
randaoEpochLowerBound = uint64(stEpoch) - uint64(st.RandaoMixesLength())
|
||||
if uint64(stEpoch) >= uint64(st.RandaoMixesLength()) {
|
||||
randaoEpochLowerBound = uint64(stEpoch) - uint64(st.RandaoMixesLength()) + 1
|
||||
}
|
||||
if epoch > stEpoch || uint64(epoch) < randaoEpochLowerBound+1 {
|
||||
if epoch > stEpoch || (uint64(epoch) < randaoEpochLowerBound) {
|
||||
httputil.HandleError(w, "Epoch is out of range for the randao mixes of the state", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -192,8 +192,14 @@ func TestGetRandao(t *testing.T) {
|
||||
assert.Equal(t, hexutil.Encode(mixOld[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("head state below `EpochsPerHistoricalVector`", func(t *testing.T) {
|
||||
s.Stater = &testutil.MockStater{
|
||||
BeaconState: headSt,
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: headSt,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
|
||||
@@ -303,6 +309,74 @@ func TestGetRandao(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.DeepEqual(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("early epoch scenario - epoch 0 from state at epoch (EpochsPerHistoricalVector - 1)", func(t *testing.T) {
|
||||
// Create a state at early epoch
|
||||
earlyEpochState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
earlyEpoch := params.BeaconConfig().EpochsPerHistoricalVector - 1
|
||||
require.NoError(t, earlyEpochState.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(earlyEpoch)))
|
||||
|
||||
// Set up RANDAO mix for epoch 0
|
||||
// In real networks, this would be the ETH1 block hash used for genesis
|
||||
epoch0Randao := bytesutil.ToBytes32([]byte("epoch0"))
|
||||
require.NoError(t, earlyEpochState.UpdateRandaoMixesAtIndex(0, epoch0Randao))
|
||||
|
||||
earlyServer := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: earlyEpochState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
}
|
||||
|
||||
// Query epoch 0 from state at epoch (EpochsPerHistoricalVector - 1) - should succeed
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=0", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
earlyServer.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Early epoch queries should succeed when within bounds")
|
||||
|
||||
resp := &structs.GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(epoch0Randao[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("early epoch scenario - epoch 0 from state at epoch EpochsPerHistoricalVector", func(t *testing.T) {
|
||||
// Create a state at early epoch
|
||||
earlyEpochState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
earlyEpoch := params.BeaconConfig().EpochsPerHistoricalVector
|
||||
require.NoError(t, earlyEpochState.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(earlyEpoch)))
|
||||
|
||||
// Set up RANDAO mix for epoch 0
|
||||
// In real networks, this would be the ETH1 block hash used for genesis
|
||||
epoch0Randao := bytesutil.ToBytes32([]byte("epoch0"))
|
||||
require.NoError(t, earlyEpochState.UpdateRandaoMixesAtIndex(0, epoch0Randao))
|
||||
|
||||
earlyServer := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: earlyEpochState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
OptimisticModeFetcher: &chainMock.ChainService{},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
}
|
||||
|
||||
// Query epoch 0 from state at epoch EpochsPerHistoricalVector - should fail
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=0", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
earlyServer.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.StringContains(t, "Epoch is out of range for the randao mixes of the state", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_currentCommitteeIndicesFromState(t *testing.T) {
|
||||
|
||||
@@ -13,7 +13,9 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -33,6 +35,7 @@ go_test(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -41,6 +44,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
field_params "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -22,6 +24,8 @@ import (
|
||||
)
|
||||
|
||||
// Blobs is an HTTP handler for Beacon API getBlobs.
|
||||
// Deprecated: /eth/v1/beacon/blob_sidecars/{block_id} in favor of /eth/v1/beacon/blobs/{block_id}
|
||||
// the endpoint will continue to work post fulu for some time however
|
||||
func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.Blobs")
|
||||
defer span.End()
|
||||
@@ -34,7 +38,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
verifiedBlobs, rpcErr := s.Blocker.Blobs(ctx, blockId, indices)
|
||||
verifiedBlobs, rpcErr := s.Blocker.Blobs(ctx, blockId, options.WithIndices(indices))
|
||||
if rpcErr != nil {
|
||||
code := core.ErrorReasonToHTTP(rpcErr.Reason)
|
||||
switch code {
|
||||
@@ -54,12 +58,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -127,6 +126,89 @@ loop:
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// GetBlobs retrieves blobs for a given block id. ( this is the new handler that replaces func (s *Server) Blobs )
|
||||
func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
var verifiedBlobs []*blocks.VerifiedROBlob
|
||||
var rpcErr *core.RpcError
|
||||
|
||||
// Check if versioned_hashes parameter is provided
|
||||
versionedHashesStr := r.URL.Query()["versioned_hashes"]
|
||||
versionedHashes := make([][]byte, len(versionedHashesStr))
|
||||
if len(versionedHashesStr) > 0 {
|
||||
for i, hashStr := range versionedHashesStr {
|
||||
hash, ok := shared.ValidateHex(w, fmt.Sprintf("versioned_hashes[%d]", i), hashStr, 32)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
versionedHashes[i] = hash
|
||||
}
|
||||
}
|
||||
verifiedBlobs, rpcErr = s.Blocker.Blobs(ctx, blockId, options.WithVersionedHashes(versionedHashes))
|
||||
if rpcErr != nil {
|
||||
code := core.ErrorReasonToHTTP(rpcErr.Reason)
|
||||
switch code {
|
||||
case http.StatusBadRequest:
|
||||
httputil.HandleError(w, "Bad Request: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusNotFound:
|
||||
httputil.HandleError(w, "Not found: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusInternalServerError:
|
||||
httputil.HandleError(w, "Internal server error: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
default:
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszLen := fieldparams.BlobSize
|
||||
sszData := make([]byte, len(verifiedBlobs)*sszLen)
|
||||
for i := range verifiedBlobs {
|
||||
copy(sszData[i*sszLen:(i+1)*sszLen], verifiedBlobs[i].Blob)
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteSsz(w, sszData)
|
||||
return
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := make([]string, len(verifiedBlobs))
|
||||
for i, v := range verifiedBlobs {
|
||||
data[i] = hexutil.Encode(v.Blob)
|
||||
}
|
||||
resp := &structs.GetBlobsResponse{
|
||||
Data: data,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) []*structs.Sidecar {
|
||||
sidecars := make([]*structs.Sidecar, len(verifiedBlobs))
|
||||
for i, sc := range verifiedBlobs {
|
||||
@@ -147,13 +229,13 @@ func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) []*struct
|
||||
}
|
||||
|
||||
func buildSidecarsSSZResponse(verifiedBlobs []*blocks.VerifiedROBlob) ([]byte, error) {
|
||||
ssz := make([]byte, field_params.BlobSidecarSize*len(verifiedBlobs))
|
||||
ssz := make([]byte, fieldparams.BlobSidecarSize*len(verifiedBlobs))
|
||||
for i, sidecar := range verifiedBlobs {
|
||||
sszrep, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal sidecar ssz")
|
||||
}
|
||||
copy(ssz[i*field_params.BlobSidecarSize:(i+1)*field_params.BlobSidecarSize], sszrep)
|
||||
copy(ssz[i*fieldparams.BlobSidecarSize:(i+1)*fieldparams.BlobSidecarSize], sszrep)
|
||||
}
|
||||
return ssz, nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -69,7 +71,7 @@ func TestBlobs(t *testing.T) {
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "blobs are not supported for Phase 0 fork", e.Message)
|
||||
assert.StringContains(t, "not supported for Phase 0 fork", e.Message)
|
||||
})
|
||||
t.Run("head", func(t *testing.T) {
|
||||
u := "http://foo.example/head"
|
||||
@@ -278,7 +280,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 w/ empty list ", func(t *testing.T) {
|
||||
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -296,7 +298,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
@@ -334,11 +336,23 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot before Deneb fork", func(t *testing.T) {
|
||||
// Create and save a pre-Deneb block at slot 31
|
||||
predenebBlock := util.NewBeaconBlock()
|
||||
predenebBlock.Block.Slot = 31
|
||||
util.SaveBlock(t, t.Context(), db, predenebBlock)
|
||||
|
||||
u := "http://foo.example/31"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
@@ -346,7 +360,7 @@ func TestBlobs(t *testing.T) {
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "blobs are not supported before Deneb fork", e.Message)
|
||||
assert.StringContains(t, "not supported before", e.Message)
|
||||
})
|
||||
t.Run("malformed block ID", func(t *testing.T) {
|
||||
u := "http://foo.example/foo"
|
||||
@@ -558,7 +572,584 @@ func Test_parseIndices(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBlobs(t *testing.T) {
|
||||
// Start the trusted setup for KZG operations (needed for data columns)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.ElectraForkEpoch = 10
|
||||
cfg.FuluForkEpoch = 20
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 0},
|
||||
{Epoch: 1, MaxBlobsPerBlock: 6}, // Deneb
|
||||
{Epoch: 10, MaxBlobsPerBlock: 9}, // Electra
|
||||
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
}
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
u := "http://foo.example/genesis"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "not supported for Phase 0 fork", e.Message)
|
||||
})
|
||||
t.Run("head", func(t *testing.T) {
|
||||
u := "http://foo.example/head"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:], Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].Blob), blob)
|
||||
blob = resp.Data[1]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[1].Blob), blob)
|
||||
blob = resp.Data[2]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[2].Blob), blob)
|
||||
blob = resp.Data[3]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].Blob), blob)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("root", func(t *testing.T) {
|
||||
u := "http://foo.example/" + hexutil.Encode(blockRoot[:])
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t), // new ephemeral storage
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, len(resp.Data), 0)
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("outside retention period still returns 200 what we have in db ", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
moc := &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: moc,
|
||||
GenesisTimeFetcher: moc, // genesis time is set to 0 here, so it results in current epoch being extremely large
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commitments), 0)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
|
||||
u := "http://foo.example/333"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot before Deneb fork", func(t *testing.T) {
|
||||
// Create and save a pre-Deneb block at slot 31
|
||||
predenebBlock := util.NewBeaconBlock()
|
||||
predenebBlock.Block.Slot = 31
|
||||
util.SaveBlock(t, t.Context(), db, predenebBlock)
|
||||
|
||||
u := "http://foo.example/31"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "not supported before", e.Message)
|
||||
})
|
||||
t.Run("malformed block ID", func(t *testing.T) {
|
||||
u := "http://foo.example/foo"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "could not parse block ID"))
|
||||
})
|
||||
t.Run("ssz", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
assert.Equal(t, version.String(version.Deneb), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, fieldparams.BlobSize*4, len(writer.Body.Bytes())) // size of 4 sidecars
|
||||
// unmarshal all 4 blobs
|
||||
blbs := unmarshalBlobs(t, writer.Body.Bytes())
|
||||
require.Equal(t, 4, len(blbs))
|
||||
})
|
||||
t.Run("ssz multiple blobs", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
blbs := unmarshalBlobs(t, writer.Body.Bytes())
|
||||
require.Equal(t, 4, len(blbs))
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes invalid hex", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized?versioned_hashes=invalidhex,invalid2hex"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "versioned_hashes[0] is invalid", e.Message)
|
||||
assert.StringContains(t, "hex string without 0x prefix", e.Message)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes invalid length", func(t *testing.T) {
|
||||
// Using 16 bytes instead of 32
|
||||
shortHash := "0x1234567890abcdef1234567890abcdef"
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s", shortHash)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid versioned_hashes[0]:", e.Message)
|
||||
assert.StringContains(t, "is not length 32", e.Message)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes valid single hash", func(t *testing.T) {
|
||||
// Get the first blob's commitment and convert to versioned hash
|
||||
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(blobs[0].KzgCommitment)
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s", hexutil.Encode(versionedHash[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Data)) // Should return only the requested blob
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].Blob), blob)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes multiple hashes", func(t *testing.T) {
|
||||
// Get commitments for blobs 1 and 3 and convert to versioned hashes
|
||||
versionedHash1 := primitives.ConvertKzgCommitmentToVersionedHash(blobs[1].KzgCommitment)
|
||||
versionedHash3 := primitives.ConvertKzgCommitmentToVersionedHash(blobs[3].KzgCommitment)
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s&versioned_hashes=%s",
|
||||
hexutil.Encode(versionedHash1[:]), hexutil.Encode(versionedHash3[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data)) // Should return 2 requested blobs
|
||||
// Verify blobs are returned in KZG commitment order from the block (1, 3)
|
||||
// not in the requested order
|
||||
assert.Equal(t, hexutil.Encode(blobs[1].Blob), resp.Data[0])
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].Blob), resp.Data[1])
|
||||
})
|
||||
|
||||
// Test for Electra fork
|
||||
t.Run("electra max blobs", func(t *testing.T) {
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, maxBlobsPerBlockByVersion(version.Electra))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
electraBs := filesystem.NewEphemeralBlobStorage(t)
|
||||
electraSidecars := verification.FakeVerifySliceForTest(t, electraBlobs)
|
||||
for i := range electraSidecars {
|
||||
require.NoError(t, electraBs.Save(electraSidecars[i]))
|
||||
}
|
||||
electraBlockRoot := electraBlobs[0].BlockRoot()
|
||||
|
||||
u := "http://foo.example/323"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: electraBlockRoot[:]}, Block: electraBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: electraBs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(electraBlobs[0].Blob), blob)
|
||||
})
|
||||
|
||||
// Test for Fulu fork with data columns
|
||||
t.Run("fulu with data columns", func(t *testing.T) {
|
||||
// Generate a Fulu block with data columns
|
||||
fuluForkSlot := primitives.Slot(20 * params.BeaconConfig().SlotsPerEpoch) // Fulu is at epoch 20
|
||||
fuluBlock, _, verifiedRoDataColumnSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithSlot(fuluForkSlot))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), fuluBlock.ReadOnlySignedBeaconBlock))
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
// Store data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
require.NoError(t, dataColumnStorage.Save(verifiedRoDataColumnSidecars))
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/%d", fuluForkSlot)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
// Create an empty blob storage (won't be used but needs to be non-nil)
|
||||
_, emptyBlobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: fuluBlockRoot[:]}, Block: fuluBlock.ReadOnlySignedBeaconBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: emptyBlobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
|
||||
// Verify that we got blobs back (they were reconstructed from data columns)
|
||||
for i := range resp.Data {
|
||||
require.NotNil(t, resp.Data[i])
|
||||
}
|
||||
})
|
||||
|
||||
// Test for Fulu with versioned hashes and data columns
|
||||
t.Run("fulu versioned_hashes with data columns", func(t *testing.T) {
|
||||
// Generate a Fulu block with data columns
|
||||
fuluForkSlot2 := primitives.Slot(20*params.BeaconConfig().SlotsPerEpoch + 10) // Fulu is at epoch 20
|
||||
fuluBlock2, _, verifiedRoDataColumnSidecars2 := util.GenerateTestFuluBlockWithSidecars(t, 4, util.WithSlot(fuluForkSlot2))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), fuluBlock2.ReadOnlySignedBeaconBlock))
|
||||
fuluBlockRoot2 := fuluBlock2.Root()
|
||||
|
||||
// Store data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
require.NoError(t, dataColumnStorage.Save(verifiedRoDataColumnSidecars2))
|
||||
|
||||
// Get the commitments from the block to derive versioned hashes
|
||||
commitments, err := fuluBlock2.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, len(commitments) >= 3)
|
||||
|
||||
// Request specific blobs by versioned hashes in reverse order
|
||||
// We request commitments[2] and commitments[0], but they should be returned
|
||||
// in commitment order from the block (0, 2), not in the requested order
|
||||
versionedHash1 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[2])
|
||||
versionedHash2 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[0])
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/%d?versioned_hashes=%s&versioned_hashes=%s", fuluForkSlot2,
|
||||
hexutil.Encode(versionedHash1[:]),
|
||||
hexutil.Encode(versionedHash2[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
// Create an empty blob storage (won't be used but needs to be non-nil)
|
||||
_, emptyBlobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: fuluBlockRoot2[:]}, Block: fuluBlock2.ReadOnlySignedBeaconBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: emptyBlobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
// Blobs are returned in commitment order, regardless of request order
|
||||
})
|
||||
}
|
||||
|
||||
func unmarshalBlobs(t *testing.T, response []byte) [][]byte {
|
||||
require.Equal(t, 0, len(response)%fieldparams.BlobSize)
|
||||
if len(response) == fieldparams.BlobSize {
|
||||
return [][]byte{response}
|
||||
}
|
||||
blobs := make([][]byte, len(response)/fieldparams.BlobSize)
|
||||
for i := range blobs {
|
||||
blobs[i] = response[i*fieldparams.BlobSize : (i+1)*fieldparams.BlobSize]
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
func maxBlobsPerBlockByVersion(v int) int {
|
||||
if v >= version.Fulu {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu
|
||||
}
|
||||
if v >= version.Electra {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
|
||||
@@ -13,13 +13,19 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -34,9 +40,13 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -4,19 +4,31 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const errMsgStateFromConsensus = "Could not convert consensus state to response"
|
||||
const (
|
||||
errMsgStateFromConsensus = "Could not convert consensus state to response"
|
||||
)
|
||||
|
||||
// GetBeaconStateV2 returns the full beacon state for a given state ID.
|
||||
func (s *Server) GetBeaconStateV2(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -208,3 +220,176 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// DataColumnSidecars retrieves data column sidecars for a given block id.
|
||||
func (s *Server) DataColumnSidecars(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "debug.DataColumnSidecars")
|
||||
defer span.End()
|
||||
|
||||
// Check if we're before Fulu fork - data columns are only available from Fulu onwards
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
if fuluForkEpoch == math.MaxUint64 {
|
||||
httputil.HandleError(w, "Data columns are not supported - Fulu fork not configured", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we're before Fulu fork based on current slot
|
||||
currentSlot := s.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := primitives.Epoch(currentSlot / params.BeaconConfig().SlotsPerEpoch)
|
||||
if currentEpoch < fuluForkEpoch {
|
||||
httputil.HandleError(w, "Data columns are not supported - before Fulu fork", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
indices, err := parseDataColumnIndices(r.URL)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
verifiedDataColumns, rpcErr := s.Blocker.DataColumns(ctx, blockId, indices)
|
||||
if rpcErr != nil {
|
||||
code := core.ErrorReasonToHTTP(rpcErr.Reason)
|
||||
switch code {
|
||||
case http.StatusBadRequest:
|
||||
httputil.HandleError(w, "Bad request: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusNotFound:
|
||||
httputil.HandleError(w, "Not found: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusInternalServerError:
|
||||
httputil.HandleError(w, "Internal server error: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
default:
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszResp, err := buildDataColumnSidecarsSSZResponse(verifiedDataColumns)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteSsz(w, sszResp)
|
||||
return
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := buildDataColumnSidecarsJsonResponse(verifiedDataColumns)
|
||||
resp := &structs.GetDebugDataColumnSidecarsResponse{
|
||||
Version: version.String(blk.Version()),
|
||||
Data: data,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// parseDataColumnIndices filters out invalid and duplicate data column indices
|
||||
func parseDataColumnIndices(url *url.URL) ([]int, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]int, 0, numberOfColumns)
|
||||
invalidIndices := make([]string, 0)
|
||||
loop:
|
||||
for _, raw := range rawIndices {
|
||||
ix, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
if !(0 <= ix && uint64(ix) < numberOfColumns) {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
for i := range indices {
|
||||
if ix == indices[i] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
indices = append(indices, ix)
|
||||
}
|
||||
|
||||
if len(invalidIndices) > 0 {
|
||||
return nil, fmt.Errorf("requested data column indices %v are invalid", invalidIndices)
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
func buildDataColumnSidecarsJsonResponse(verifiedDataColumns []blocks.VerifiedRODataColumn) []*structs.DataColumnSidecar {
|
||||
sidecars := make([]*structs.DataColumnSidecar, len(verifiedDataColumns))
|
||||
for i, dc := range verifiedDataColumns {
|
||||
column := make([]string, len(dc.Column))
|
||||
for j, cell := range dc.Column {
|
||||
column[j] = hexutil.Encode(cell)
|
||||
}
|
||||
|
||||
kzgCommitments := make([]string, len(dc.KzgCommitments))
|
||||
for j, commitment := range dc.KzgCommitments {
|
||||
kzgCommitments[j] = hexutil.Encode(commitment)
|
||||
}
|
||||
|
||||
kzgProofs := make([]string, len(dc.KzgProofs))
|
||||
for j, proof := range dc.KzgProofs {
|
||||
kzgProofs[j] = hexutil.Encode(proof)
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof := make([]string, len(dc.KzgCommitmentsInclusionProof))
|
||||
for j, proof := range dc.KzgCommitmentsInclusionProof {
|
||||
kzgCommitmentsInclusionProof[j] = hexutil.Encode(proof)
|
||||
}
|
||||
|
||||
sidecars[i] = &structs.DataColumnSidecar{
|
||||
Index: strconv.FormatUint(dc.Index, 10),
|
||||
Column: column,
|
||||
KzgCommitments: kzgCommitments,
|
||||
KzgProofs: kzgProofs,
|
||||
SignedBeaconBlockHeader: structs.SignedBeaconBlockHeaderFromConsensus(dc.SignedBlockHeader),
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
}
|
||||
return sidecars
|
||||
}
|
||||
|
||||
// buildDataColumnSidecarsSSZResponse builds SSZ response for data column sidecars
|
||||
func buildDataColumnSidecarsSSZResponse(verifiedDataColumns []blocks.VerifiedRODataColumn) ([]byte, error) {
|
||||
if len(verifiedDataColumns) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// Pre-allocate buffer for all sidecars using the known SSZ size
|
||||
sizePerSidecar := (ðpb.DataColumnSidecar{}).SizeSSZ()
|
||||
ssz := make([]byte, 0, sizePerSidecar*len(verifiedDataColumns))
|
||||
|
||||
// Marshal and append each sidecar
|
||||
for i, sidecar := range verifiedDataColumns {
|
||||
sszrep, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal data column sidecar at index %d", i)
|
||||
}
|
||||
ssz = append(ssz, sszrep...)
|
||||
}
|
||||
|
||||
return ssz, nil
|
||||
}
|
||||
|
||||
@@ -2,9 +2,13 @@ package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
@@ -13,9 +17,13 @@ import (
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -515,3 +523,285 @@ func TestGetForkChoice(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, "2", resp.FinalizedCheckpoint.Epoch)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("Fulu fork not configured", func(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set Fulu fork epoch to MaxUint64 (unconfigured)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = math.MaxUint64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
|
||||
// Create a mock blocker to avoid nil pointer
|
||||
mockBlocker := &testutil.MockBlocker{}
|
||||
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: chainService,
|
||||
Blocker: mockBlocker,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.DataColumnSidecars(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Data columns are not supported - Fulu fork not configured", writer.Body.String())
|
||||
})
|
||||
|
||||
t.Run("Before Fulu fork", func(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set Fulu fork epoch to 100
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
currentSlot := primitives.Slot(0) // Current slot 0 (epoch 0, before epoch 100)
|
||||
chainService.Slot = ¤tSlot
|
||||
|
||||
// Create a mock blocker to avoid nil pointer
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
return nil, &core.RpcError{Err: errors.New("before Fulu fork"), Reason: core.BadRequest}
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: chainService,
|
||||
Blocker: mockBlocker,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.DataColumnSidecars(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Data columns are not supported - before Fulu fork", writer.Body.String())
|
||||
})
|
||||
|
||||
t.Run("Invalid indices", func(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set Fulu fork epoch to 0 (already activated)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
currentSlot := primitives.Slot(0) // Current slot 0 (epoch 0, at fork)
|
||||
chainService.Slot = ¤tSlot
|
||||
|
||||
// Create a mock blocker to avoid nil pointer
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
return nil, &core.RpcError{Err: errors.New("invalid index"), Reason: core.BadRequest}
|
||||
},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: chainService,
|
||||
Blocker: mockBlocker,
|
||||
}
|
||||
|
||||
// Test with invalid index (out of range)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head?indices=9999", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.DataColumnSidecars(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "requested data column indices [9999] are invalid", writer.Body.String())
|
||||
})
|
||||
|
||||
t.Run("Block not found", func(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set Fulu fork epoch to 0 (already activated)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
currentSlot := primitives.Slot(0) // Current slot 0
|
||||
chainService.Slot = ¤tSlot
|
||||
|
||||
// Create a mock blocker that returns block not found
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
return nil, &core.RpcError{Err: errors.New("block not found"), Reason: core.NotFound}
|
||||
},
|
||||
BlockToReturn: nil, // Block not found
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: chainService,
|
||||
Blocker: mockBlocker,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.DataColumnSidecars(writer, request)
|
||||
require.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("Empty data columns", func(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set Fulu fork epoch to 0
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
// Create a simple test block
|
||||
signedTestBlock := util.NewBeaconBlock()
|
||||
roBlock, err := blocks.NewSignedBeaconBlock(signedTestBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
currentSlot := primitives.Slot(0) // Current slot 0
|
||||
chainService.Slot = ¤tSlot
|
||||
chainService.OptimisticRoots = make(map[[32]byte]bool)
|
||||
chainService.FinalizedRoots = make(map[[32]byte]bool)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
return []blocks.VerifiedRODataColumn{}, nil // Empty data columns
|
||||
},
|
||||
BlockToReturn: roBlock,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
Blocker: mockBlocker,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.DataColumnSidecars(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
resp := &structs.GetDebugDataColumnSidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseDataColumnIndices(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set NumberOfColumns to 128 for testing
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams map[string][]string
|
||||
expected []int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "no indices",
|
||||
queryParams: map[string][]string{},
|
||||
expected: []int{},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid indices",
|
||||
queryParams: map[string][]string{"indices": {"0", "1", "127"}},
|
||||
expected: []int{0, 1, 127},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "duplicate indices",
|
||||
queryParams: map[string][]string{"indices": {"0", "1", "0"}},
|
||||
expected: []int{0, 1},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid string index",
|
||||
queryParams: map[string][]string{"indices": {"abc"}},
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "negative index",
|
||||
queryParams: map[string][]string{"indices": {"-1"}},
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "index too large",
|
||||
queryParams: map[string][]string{"indices": {"128"}}, // 128 >= NumberOfColumns (128)
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "mixed valid and invalid",
|
||||
queryParams: map[string][]string{"indices": {"0", "abc", "1"}},
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
u, err := url.Parse("http://example.com/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
q := u.Query()
|
||||
for key, values := range tt.queryParams {
|
||||
for _, value := range values {
|
||||
q.Add(key, value)
|
||||
}
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
result, err := parseDataColumnIndices(u)
|
||||
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildDataColumnSidecarsSSZResponse(t *testing.T) {
|
||||
t.Run("empty data columns", func(t *testing.T) {
|
||||
result, err := buildDataColumnSidecarsSSZResponse([]blocks.VerifiedRODataColumn{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []byte{}, result)
|
||||
})
|
||||
|
||||
t.Run("get SSZ size", func(t *testing.T) {
|
||||
size := (ðpb.DataColumnSidecar{}).SizeSSZ()
|
||||
assert.Equal(t, true, size > 0)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,4 +20,6 @@ type Server struct {
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
Blocker lookup.Blocker
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ func HandleIsOptimisticError(w http.ResponseWriter, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
if errors.As(err, &blockRootsNotFoundErr) {
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func isStateRootOptimistic(
|
||||
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
|
||||
}
|
||||
if !has {
|
||||
return true, lookup.NewBlockRootsNotFoundError()
|
||||
return true, lookup.NewBlockNotFoundError("no block roots returned from the database")
|
||||
}
|
||||
for _, r := range roots {
|
||||
b, err := database.Block(ctx, r)
|
||||
|
||||
@@ -286,8 +286,8 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
require.Equal(t, true, errors.As(err, &blockRootsNotFoundErr))
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
require.Equal(t, true, errors.As(err, &blockNotFoundErr))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -395,11 +395,11 @@ func TestHandleIsOptimisticError(t *testing.T) {
|
||||
})
|
||||
t.Run("no block roots error handled as 404", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
blockRootsErr := lookup.NewBlockRootsNotFoundError()
|
||||
HandleIsOptimisticError(rr, blockRootsErr)
|
||||
blockNotFoundErr := lookup.NewBlockNotFoundError("no block roots returned from the database")
|
||||
HandleIsOptimisticError(rr, blockNotFoundErr)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.StringContains(t, blockRootsErr.Error(), rr.Body.String())
|
||||
require.StringContains(t, blockNotFoundErr.Error(), rr.Body.String())
|
||||
})
|
||||
|
||||
t.Run("generic error handled as 500", func(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,8 @@ go_library(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -32,9 +33,11 @@ go_test(
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -90,10 +90,21 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if startPeriod*uint64(config.EpochsPerSyncCommitteePeriod) < uint64(config.AltairForkEpoch) {
|
||||
httputil.HandleError(w, "Invalid 'start_period': before Altair fork", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
endPeriod := startPeriod + count - 1
|
||||
|
||||
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// get updates
|
||||
updates, err := s.LCStore.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
updates, err := s.LCStore.LightClientUpdates(ctx, startPeriod, endPeriod, headBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get light client updates from DB: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -2,6 +2,7 @@ package lightclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
@@ -12,9 +13,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
blockchainTest "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -55,7 +58,8 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -99,7 +103,8 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -142,8 +147,9 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("no bootstrap found", func(t *testing.T) {
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode([]byte{0x00, 0x01, 0x02}))
|
||||
@@ -185,13 +191,24 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updatePeriod := startPeriod
|
||||
for _, update := range updates {
|
||||
err := s.LCStore.SaveLightClientUpdate(ctx, updatePeriod, update)
|
||||
err := db.SaveLightClientUpdate(ctx, updatePeriod, update)
|
||||
require.NoError(t, err)
|
||||
updatePeriod++
|
||||
}
|
||||
@@ -326,20 +343,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
secondForkSlot := primitives.Slot(params.BeaconConfig().VersionToForkEpochMap()[testVersion+1] * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 2)
|
||||
|
||||
updatePeriod := firstForkSlot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
startPeriod := updatePeriod
|
||||
|
||||
var err error
|
||||
updates[0], err = createUpdate(t, testVersion)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[0])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod = secondForkSlot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
@@ -347,7 +374,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[1], err = createUpdate(t, testVersion+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[1])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("json", func(t *testing.T) {
|
||||
@@ -446,19 +473,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
var err error
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -493,20 +531,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -536,10 +584,16 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("start period before altair", func(t *testing.T) {
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
}
|
||||
|
||||
startPeriod := 0
|
||||
url := fmt.Sprintf("http://foo.com/?count=128&start_period=%d", startPeriod)
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
@@ -548,11 +602,10 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
var resp structs.LightClientUpdatesByRangeResponse
|
||||
err := json.Unmarshal(writer.Body.Bytes(), &resp.Updates)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(resp.Updates))
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
|
||||
config.AltairForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
})
|
||||
|
||||
t.Run("missing updates", func(t *testing.T) {
|
||||
@@ -560,15 +613,25 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
t.Run("missing update in the middle", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
if i == 1 { // skip this update
|
||||
updatePeriod++
|
||||
@@ -577,7 +640,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -604,15 +667,25 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
t.Run("missing update at the beginning", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
if i == 0 { // skip this update
|
||||
updatePeriod++
|
||||
@@ -622,7 +695,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -665,7 +738,12 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -690,7 +768,12 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -729,7 +812,11 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
t.Run("no update", func(t *testing.T) {
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -745,7 +832,12 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -769,7 +861,12 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -984,3 +1081,14 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func saveHead(t *testing.T, ctx context.Context, d db.Database) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, d, blk)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveState(ctx, st, blkRoot))
|
||||
require.NoError(t, d.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
LCStore *lightClient.Store
|
||||
LCStore *lightClient.Store
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
@@ -29,6 +29,11 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
|
||||
@@ -49,3 +49,59 @@ func TestWriteStateFetchError(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockFetchError tests the WriteBlockFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Nil block should return 404",
|
||||
err: nil,
|
||||
expectedMessage: "Could not find requested block",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockFetchError(writer, nil, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -23,17 +24,19 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type BlockRootsNotFoundError struct {
|
||||
// BlockNotFoundError represents an error when a block cannot be found.
|
||||
type BlockNotFoundError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewBlockRootsNotFoundError() *BlockRootsNotFoundError {
|
||||
return &BlockRootsNotFoundError{
|
||||
message: "no block roots returned from the database",
|
||||
// NewBlockNotFoundError creates a new BlockNotFoundError with a custom message.
|
||||
func NewBlockNotFoundError(msg string) *BlockNotFoundError {
|
||||
return &BlockNotFoundError{
|
||||
message: msg,
|
||||
}
|
||||
}
|
||||
|
||||
func (e BlockRootsNotFoundError) Error() string {
|
||||
func (e *BlockNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
@@ -57,7 +60,8 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
}
|
||||
|
||||
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
|
||||
@@ -69,6 +73,141 @@ type BeaconDbBlocker struct {
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// resolveBlockIDByRootOrSlot resolves a block ID that is either a root (hex string or raw bytes) or a slot number.
|
||||
func (p *BeaconDbBlocker) resolveBlockIDByRootOrSlot(ctx context.Context, id string) ([fieldparams.RootLength]byte, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
var rootSlice []byte
|
||||
|
||||
// Check if it's a hex-encoded root
|
||||
if bytesutil.IsHex([]byte(id)) {
|
||||
var err error
|
||||
rootSlice, err = bytesutil.DecodeHexWithLength(id, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
e := NewBlockIdParseError(err)
|
||||
return [32]byte{}, nil, &e
|
||||
}
|
||||
} else if len(id) == 32 {
|
||||
// Handle raw 32-byte root
|
||||
rootSlice = []byte(id)
|
||||
} else {
|
||||
// Try to parse as slot number
|
||||
slot, err := strconv.ParseUint(id, 10, 64)
|
||||
if err != nil {
|
||||
e := NewBlockIdParseError(err)
|
||||
return [32]byte{}, nil, &e
|
||||
}
|
||||
|
||||
// Get block roots for the slot
|
||||
ok, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
|
||||
}
|
||||
if !ok || len(roots) == 0 {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("no blocks found at slot %d", slot))
|
||||
}
|
||||
|
||||
// Find the canonical block root
|
||||
if p.ChainInfoFetcher == nil {
|
||||
return [32]byte{}, nil, errors.New("chain info fetcher is not configured")
|
||||
}
|
||||
|
||||
for _, root := range roots {
|
||||
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrapf(err, "could not determine if block root is canonical")
|
||||
}
|
||||
if canonical {
|
||||
rootSlice = root[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If no canonical block found, rootSlice remains nil
|
||||
if rootSlice == nil {
|
||||
// No canonical block at this slot
|
||||
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("no canonical block found at slot %d", slot))
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the block using the root
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
blk, err := p.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice)
|
||||
}
|
||||
if blk == nil {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("block %#x not found in db", rootSlice))
|
||||
}
|
||||
|
||||
return root, blk, nil
|
||||
}
|
||||
|
||||
// resolveBlockID resolves a block ID to root and signed block.
|
||||
// Fork validation is handled outside this function by the calling methods.
|
||||
func (p *BeaconDbBlocker) resolveBlockID(ctx context.Context, id string) ([fieldparams.RootLength]byte, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
switch id {
|
||||
case "genesis":
|
||||
blk, err := p.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve genesis block")
|
||||
}
|
||||
if blk == nil {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError("genesis block not found")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
return root, blk, nil
|
||||
|
||||
case "head":
|
||||
blk, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
if blk == nil {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError("head block not found")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not get head block root")
|
||||
}
|
||||
return root, blk, nil
|
||||
|
||||
case "finalized":
|
||||
finalized := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return [32]byte{}, nil, errors.New("received nil finalized checkpoint")
|
||||
}
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
blk, err := p.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve finalized block")
|
||||
}
|
||||
if blk == nil {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("finalized block %#x not found", finalizedRoot))
|
||||
}
|
||||
return finalizedRoot, blk, nil
|
||||
|
||||
case "justified":
|
||||
jcp := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
|
||||
if jcp == nil {
|
||||
return [32]byte{}, nil, errors.New("received nil justified checkpoint")
|
||||
}
|
||||
justifiedRoot := bytesutil.ToBytes32(jcp.Root)
|
||||
blk, err := p.BeaconDB.Block(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve justified block")
|
||||
}
|
||||
if blk == nil {
|
||||
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("justified block %#x not found", justifiedRoot))
|
||||
}
|
||||
return justifiedRoot, blk, nil
|
||||
|
||||
default:
|
||||
return p.resolveBlockIDByRootOrSlot(ctx, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
@@ -78,76 +217,16 @@ type BeaconDbBlocker struct {
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
// - <block root>
|
||||
func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
var err error
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
switch string(id) {
|
||||
case "head":
|
||||
blk, err = p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
case "finalized":
|
||||
finalized := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
blk, err = p.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not get finalized block from db")
|
||||
}
|
||||
case "genesis":
|
||||
blk, err = p.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve genesis block")
|
||||
}
|
||||
default:
|
||||
if bytesutil.IsHex(id) {
|
||||
decoded, err := hexutil.Decode(string(id))
|
||||
if err != nil {
|
||||
e := NewBlockIdParseError(err)
|
||||
return nil, &e
|
||||
}
|
||||
blk, err = p.BeaconDB.Block(ctx, bytesutil.ToBytes32(decoded))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve block")
|
||||
}
|
||||
} else if len(id) == 32 {
|
||||
blk, err = p.BeaconDB.Block(ctx, bytesutil.ToBytes32(id))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve block")
|
||||
}
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(string(id), 10, 64)
|
||||
if err != nil {
|
||||
e := NewBlockIdParseError(err)
|
||||
return nil, &e
|
||||
}
|
||||
blks, err := p.BeaconDB.BlocksBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve blocks for slot %d", slot)
|
||||
}
|
||||
_, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
|
||||
}
|
||||
numBlks := len(blks)
|
||||
if numBlks == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for i, b := range blks {
|
||||
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, roots[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not determine if block root is canonical")
|
||||
}
|
||||
if canonical {
|
||||
blk = b
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
_, blk, err := p.resolveBlockID(ctx, string(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
|
||||
// Blobs returns the fetched blobs for a given block ID with configurable options.
|
||||
// Options can specify either blob indices or versioned hashes for retrieval.
|
||||
// The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
@@ -158,97 +237,46 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
//
|
||||
// cases:
|
||||
// - no block, 404
|
||||
// - block exists, no commitment, 200 w/ empty list
|
||||
// - block exists, has commitments, inside retention period (greater of protocol- or user-specified) serve then w/ 200 unless we hit an error reading them.
|
||||
// we are technically not supposed to import a block to forkchoice unless we have the blobs, so the nuance here is if we can't find the file and we are inside the protocol-defined retention period, then it's actually a 500.
|
||||
// - block exists, has commitments, outside retention period (greater of protocol- or user-specified) - ie just like block exists, no commitment
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
var rootSlice []byte
|
||||
switch id {
|
||||
case "genesis":
|
||||
return nil, &core.RpcError{Err: errors.New("blobs are not supported for Phase 0 fork"), Reason: core.BadRequest}
|
||||
case "head":
|
||||
var err error
|
||||
rootSlice, err = p.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve head root"), Reason: core.Internal}
|
||||
}
|
||||
case "finalized":
|
||||
fcp := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
if fcp == nil {
|
||||
return nil, &core.RpcError{Err: errors.New("received nil finalized checkpoint"), Reason: core.Internal}
|
||||
}
|
||||
rootSlice = fcp.Root
|
||||
case "justified":
|
||||
jcp := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
|
||||
if jcp == nil {
|
||||
return nil, &core.RpcError{Err: errors.New("received nil justified checkpoint"), Reason: core.Internal}
|
||||
}
|
||||
rootSlice = jcp.Root
|
||||
default:
|
||||
if bytesutil.IsHex([]byte(id)) {
|
||||
var err error
|
||||
rootSlice, err = bytesutil.DecodeHexWithLength(id, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: NewBlockIdParseError(err), Reason: core.BadRequest}
|
||||
}
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(id, 10, 64)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: NewBlockIdParseError(err), Reason: core.BadRequest}
|
||||
}
|
||||
denebStart, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Deneb start slot"), Reason: core.Internal}
|
||||
}
|
||||
if primitives.Slot(slot) < denebStart {
|
||||
return nil, &core.RpcError{Err: errors.New("blobs are not supported before Deneb fork"), Reason: core.BadRequest}
|
||||
}
|
||||
ok, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if !ok {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("no block roots at slot %d", slot), Reason: core.NotFound}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to get block roots for slot %d", slot), Reason: core.Internal}
|
||||
}
|
||||
rootSlice = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not determine if block %#x is canonical", blockRoot), Reason: core.Internal}
|
||||
}
|
||||
if canonical {
|
||||
rootSlice = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
// Apply options
|
||||
cfg := &options.BlobsConfig{}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
// Check for genesis block first (not supported for blobs)
|
||||
if id == "genesis" {
|
||||
return nil, &core.RpcError{Err: errors.New("not supported for Phase 0 fork"), Reason: core.BadRequest}
|
||||
}
|
||||
|
||||
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
|
||||
// Resolve block ID to root and block
|
||||
root, roSignedBlock, err := p.resolveBlockID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
|
||||
var blockNotFound *BlockNotFoundError
|
||||
var blockIdParseErr *BlockIdParseError
|
||||
|
||||
reason := core.Internal // Default to Internal for unexpected errors
|
||||
if errors.As(err, &blockNotFound) {
|
||||
reason = core.NotFound
|
||||
} else if errors.As(err, &blockIdParseErr) {
|
||||
reason = core.BadRequest
|
||||
}
|
||||
return nil, &core.RpcError{Err: err, Reason: core.ErrorReason(reason)}
|
||||
}
|
||||
|
||||
if roSignedBlock == nil {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
slot := roSignedBlock.Block().Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch {
|
||||
return nil, &core.RpcError{Err: errors.New("blobs are not supported before Deneb fork"), Reason: core.BadRequest}
|
||||
}
|
||||
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// If there are no commitments return 200 w/ empty list
|
||||
@@ -262,7 +290,47 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert versioned hashes to indices if provided
|
||||
indices := cfg.Indices
|
||||
if len(cfg.VersionedHashes) > 0 {
|
||||
// Build a map of requested versioned hashes for fast lookup and tracking
|
||||
requestedHashes := make(map[string]bool)
|
||||
for _, versionedHash := range cfg.VersionedHashes {
|
||||
requestedHashes[string(versionedHash)] = true
|
||||
}
|
||||
|
||||
// Create indices array and track which hashes we found
|
||||
indices = make([]int, 0, len(cfg.VersionedHashes))
|
||||
foundHashes := make(map[string]bool)
|
||||
|
||||
for i, commitment := range commitments {
|
||||
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
hashStr := string(versionedHash[:])
|
||||
if requestedHashes[hashStr] {
|
||||
indices = append(indices, i)
|
||||
foundHashes[hashStr] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if all requested hashes were found
|
||||
if len(indices) != len(cfg.VersionedHashes) {
|
||||
// Collect missing hashes
|
||||
missingHashes := make([]string, 0, len(cfg.VersionedHashes)-len(indices))
|
||||
for _, requestedHash := range cfg.VersionedHashes {
|
||||
if !foundHashes[string(requestedHash)] {
|
||||
missingHashes = append(missingHashes, hexutil.Encode(requestedHash))
|
||||
}
|
||||
}
|
||||
|
||||
// Create detailed error message
|
||||
errMsg := fmt.Sprintf("versioned hash(es) not found in block (requested %d hashes, found %d, missing: %v)",
|
||||
len(cfg.VersionedHashes), len(indices), missingHashes)
|
||||
|
||||
return nil, &core.RpcError{Err: errors.New(errMsg), Reason: core.NotFound}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,3 +485,93 @@ func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// DataColumns returns the data column sidecars for a given block id identifier and column indices. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
// - <block root>
|
||||
//
|
||||
// cases:
|
||||
// - no block, 404
|
||||
// - block exists, before Fulu fork, 400 (data columns are not supported before Fulu fork)
|
||||
func (p *BeaconDbBlocker) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
// Check for genesis block first (not supported for data columns)
|
||||
if id == "genesis" {
|
||||
return nil, &core.RpcError{Err: errors.New("data columns are not supported for Phase 0 fork"), Reason: core.BadRequest}
|
||||
}
|
||||
|
||||
// Resolve block ID to root and block
|
||||
root, roSignedBlock, err := p.resolveBlockID(ctx, id)
|
||||
if err != nil {
|
||||
var blockNotFound *BlockNotFoundError
|
||||
var blockIdParseErr *BlockIdParseError
|
||||
|
||||
reason := core.Internal // Default to Internal for unexpected errors
|
||||
if errors.As(err, &blockNotFound) {
|
||||
reason = core.NotFound
|
||||
} else if errors.As(err, &blockIdParseErr) {
|
||||
reason = core.BadRequest
|
||||
}
|
||||
return nil, &core.RpcError{Err: err, Reason: core.ErrorReason(reason)}
|
||||
}
|
||||
|
||||
slot := roSignedBlock.Block().Slot()
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
if slot < fuluForkSlot {
|
||||
return nil, &core.RpcError{Err: errors.New("data columns are not supported before Fulu fork"), Reason: core.BadRequest}
|
||||
}
|
||||
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// If there are no commitments return 200 w/ empty list
|
||||
if len(commitments) == 0 {
|
||||
return make([]blocks.VerifiedRODataColumn, 0), nil
|
||||
}
|
||||
|
||||
// Get column indices to retrieve
|
||||
columnIndices := make([]uint64, 0)
|
||||
if len(indices) == 0 {
|
||||
// If no indices specified, return all columns this node is custodying
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
stored := summary.Stored()
|
||||
for index := range stored {
|
||||
columnIndices = append(columnIndices, index)
|
||||
}
|
||||
} else {
|
||||
// Validate and convert indices
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
for _, index := range indices {
|
||||
if index < 0 || uint64(index) >= numberOfColumns {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is outside valid range [0, %d)", index, numberOfColumns),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
columnIndices = append(columnIndices, uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve data column sidecars from storage
|
||||
verifiedRoDataColumns, err := p.DataColumnStorage.Get(root, columnIndices)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrapf(err, "could not retrieve data columns for block root %#x", root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedRoDataColumns, nil
|
||||
}
|
||||
|
||||
@@ -14,11 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -59,11 +61,12 @@ func TestGetBlock(t *testing.T) {
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -106,6 +109,11 @@ func TestGetBlock(t *testing.T) {
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: blkContainers[32].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
@@ -160,6 +168,112 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
t.Run("non-existent block by root returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "not found", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "999999")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "no blocks found at slot", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("genesis block not found returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Note: genesis blocks don't support blobs, so this returns BadRequest
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
require.StringContains(t, "not supported for Phase 0", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("finalized block not found returns 404", func(t *testing.T) {
|
||||
// Set up a finalized checkpoint pointing to a non-existent block
|
||||
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent"), 32)
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: nonExistentRoot},
|
||||
},
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "finalized")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "finalized block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "not found", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("justified block not found returns 404", func(t *testing.T) {
|
||||
// Set up a justified checkpoint pointing to a non-existent block
|
||||
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent2"), 32)
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: nonExistentRoot},
|
||||
},
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "justified")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "justified block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "not found", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("invalid block ID returns 400", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "invalid-hex")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
require.StringContains(t, "could not parse block ID", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("database error returns 500", func(t *testing.T) {
|
||||
// Create a pre-Deneb block with valid slot
|
||||
predenebBlock := util.NewBeaconBlock()
|
||||
predenebBlock.Block.Slot = 100
|
||||
util.SaveBlock(t, ctx, db, predenebBlock)
|
||||
|
||||
// Create blocker without ChainInfoFetcher to trigger internal error when checking canonical status
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "100")
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.Internal), rpcErr.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
const (
|
||||
slot = 123
|
||||
@@ -215,21 +329,18 @@ func TestGetBlob(t *testing.T) {
|
||||
for _, blob := range fuluBlobSidecars {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProogs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, sidecarPb := range dataColumnSidecarPb {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), fuluBlock)
|
||||
@@ -239,16 +350,19 @@ func TestGetBlob(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis", nil)
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
require.StringContains(t, "not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: denebBlockRoot[:]},
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
Root: denebBlockRoot[:],
|
||||
Block: denebBlock,
|
||||
},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -256,7 +370,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -285,7 +399,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
@@ -302,7 +416,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
@@ -318,7 +432,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]), nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -327,6 +441,7 @@ func TestGetBlob(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -334,7 +449,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -353,7 +468,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", []int{index})
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -379,7 +494,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
@@ -397,7 +512,7 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
@@ -413,7 +528,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
@@ -434,7 +549,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
@@ -455,7 +570,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
@@ -482,7 +597,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
@@ -492,4 +607,487 @@ func TestGetBlob(t *testing.T) {
|
||||
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("pre-deneb block should return 400", func(t *testing.T) {
|
||||
// Setup with Deneb fork at epoch 1, so slot 0 is before Deneb
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a pre-Deneb block (slot 0, which is epoch 0)
|
||||
predenebBlock := util.NewBeaconBlock()
|
||||
predenebBlock.Block.Slot = 0
|
||||
util.SaveBlock(t, ctx, db, predenebBlock)
|
||||
predenebBlockRoot, err := predenebBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(predenebBlockRoot[:]))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "not supported before", rpcErr.Err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobs_CommitmentOrdering(t *testing.T) {
|
||||
// Set up Fulu fork configuration
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
// Start the trusted setup for KZG
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Fulu/Electra block with multiple blob commitments
|
||||
fuluForkSlot := primitives.Slot(cfg.FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
parent := [32]byte{}
|
||||
fuluBlock, fuluBlobs := util.GenerateTestElectraBlockWithSidecar(t, parent, fuluForkSlot, 3)
|
||||
|
||||
// Save the block
|
||||
err = beaconDB.SaveBlock(ctx, fuluBlock)
|
||||
require.NoError(t, err)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
// Get the commitments from the generated block
|
||||
commitments, err := fuluBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(commitments))
|
||||
|
||||
// Convert blob sidecars to data column sidecars for Fulu
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobs))
|
||||
for _, blob := range fuluBlobs {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, roDataColumn := range dataColumnSidecarPb {
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
// Set up data column storage and save data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the blocker
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: fuluBlockRoot[:],
|
||||
},
|
||||
}
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Compute versioned hashes for commitments in their block order
|
||||
hash0 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[0])
|
||||
hash1 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[1])
|
||||
hash2 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[2])
|
||||
|
||||
t.Run("blobs returned in commitment order regardless of request order", func(t *testing.T) {
|
||||
// Request versioned hashes in reverse order: 2, 1, 0
|
||||
requestedHashes := [][]byte{hash2[:], hash1[:], hash0[:]}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
if rpcErr != nil {
|
||||
t.Errorf("RPC Error: %v (reason: %v)", rpcErr.Err, rpcErr.Reason)
|
||||
return
|
||||
}
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
|
||||
// Verify blobs are returned in commitment order from the block (0, 1, 2)
|
||||
// In Fulu, blobs are reconstructed from data columns
|
||||
assert.Equal(t, uint64(0), verifiedBlobs[0].Index) // First commitment in block
|
||||
assert.Equal(t, uint64(1), verifiedBlobs[1].Index) // Second commitment in block
|
||||
assert.Equal(t, uint64(2), verifiedBlobs[2].Index) // Third commitment in block
|
||||
|
||||
// Verify the blob content matches what we expect
|
||||
for i, verifiedBlob := range verifiedBlobs {
|
||||
require.NotNil(t, verifiedBlob.BlobSidecar)
|
||||
require.DeepEqual(t, fuluBlobs[i].Blob, verifiedBlob.Blob)
|
||||
require.DeepEqual(t, fuluBlobs[i].KzgCommitment, verifiedBlob.KzgCommitment)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("subset of blobs maintains commitment order", func(t *testing.T) {
|
||||
// Request hashes for indices 1 and 0 (out of order)
|
||||
requestedHashes := [][]byte{hash1[:], hash0[:]}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
if rpcErr != nil {
|
||||
t.Errorf("RPC Error: %v (reason: %v)", rpcErr.Err, rpcErr.Reason)
|
||||
return
|
||||
}
|
||||
require.Equal(t, 2, len(verifiedBlobs))
|
||||
|
||||
// Verify blobs are returned in commitment order from the block
|
||||
assert.Equal(t, uint64(0), verifiedBlobs[0].Index) // First commitment in block
|
||||
assert.Equal(t, uint64(1), verifiedBlobs[1].Index) // Second commitment in block
|
||||
|
||||
// Verify the blob content matches what we expect
|
||||
require.DeepEqual(t, fuluBlobs[0].Blob, verifiedBlobs[0].Blob)
|
||||
require.DeepEqual(t, fuluBlobs[1].Blob, verifiedBlobs[1].Blob)
|
||||
})
|
||||
|
||||
t.Run("request non-existent hash", func(t *testing.T) {
|
||||
// Create a fake versioned hash
|
||||
fakeHash := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
fakeHash[i] = 0xFF
|
||||
}
|
||||
|
||||
// Request only the fake hash
|
||||
requestedHashes := [][]byte{fakeHash}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "versioned hash(es) not found in block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "requested 1 hashes, found 0", rpcErr.Err.Error())
|
||||
require.StringContains(t, "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("request multiple non-existent hashes", func(t *testing.T) {
|
||||
// Create two fake versioned hashes
|
||||
fakeHash1 := make([]byte, 32)
|
||||
fakeHash2 := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
fakeHash1[i] = 0xAA
|
||||
fakeHash2[i] = 0xBB
|
||||
}
|
||||
|
||||
// Request valid hash with two fake hashes
|
||||
requestedHashes := [][]byte{fakeHash1, hash0[:], fakeHash2}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "versioned hash(es) not found in block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "requested 3 hashes, found 1", rpcErr.Err.Error())
|
||||
// Check that both missing hashes are reported
|
||||
require.StringContains(t, "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", rpcErr.Err.Error())
|
||||
require.StringContains(t, "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", rpcErr.Err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetDataColumns(t *testing.T) {
|
||||
const (
|
||||
blobCount = 4
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupPreFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = 1000 // Set to a high epoch to ensure we're before Fulu
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Fulu block and convert blob sidecars to data column sidecars.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
for _, blob := range fuluBlobSidecars {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
|
||||
}
|
||||
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumn := range roDataColumnSidecars {
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), fuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre-fulu fork", func(t *testing.T) {
|
||||
setupPreFulu(t)
|
||||
|
||||
// Create a block at slot 123 (before Fulu fork since FuluForkEpoch is set to MaxUint64)
|
||||
preFuluBlock := util.NewBeaconBlock()
|
||||
preFuluBlock.Block.Slot = 123
|
||||
util.SaveBlock(t, ctx, db, preFuluBlock)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.DataColumns(ctx, "123", nil)
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
require.StringContains(t, "not supported before Fulu fork", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.DataColumns(ctx, "genesis", nil)
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
Root: fuluBlockRoot[:],
|
||||
Block: fuluBlock,
|
||||
},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "head", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
|
||||
|
||||
// Create a map of expected indices for easier verification
|
||||
expectedIndices := make(map[uint64]bool)
|
||||
for _, expected := range verifiedRoDataColumnSidecars {
|
||||
expectedIndices[expected.RODataColumn.DataColumnSidecar.Index] = true
|
||||
}
|
||||
|
||||
// Verify we got data columns with the expected indices
|
||||
for _, actual := range retrievedDataColumns {
|
||||
require.Equal(t, true, expectedIndices[actual.RODataColumn.DataColumnSidecar.Index])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: fuluBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "finalized", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: fuluBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "justified", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
slotStr := fmt.Sprintf("%d", fuluForkSlot)
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, slotStr, nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("specific indices", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Request specific indices (first 3 data columns)
|
||||
indices := []int{0, 1, 2}
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), indices)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 3, len(retrievedDataColumns))
|
||||
|
||||
for i, dataColumn := range retrievedDataColumns {
|
||||
require.Equal(t, uint64(indices[i]), dataColumn.RODataColumn.DataColumnSidecar.Index)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no data columns returns empty array", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, emptyDataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: emptyDataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), []int{0, math.MaxInt})
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("outside retention period", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
// Create a data column storage with very short retention period
|
||||
shortRetentionStorage, err := filesystem.NewDataColumnStorage(ctx,
|
||||
filesystem.WithDataColumnBasePath(t.TempDir()),
|
||||
filesystem.WithDataColumnRetentionEpochs(1), // Only 1 epoch retention
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mock genesis time to make current slot much later than the block slot
|
||||
// This simulates being outside retention period
|
||||
genesisTime := time.Now().Add(-time.Duration(fuluForkSlot+1000) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: genesisTime,
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: shortRetentionStorage,
|
||||
}
|
||||
|
||||
// Since the block is outside retention period, should return empty array
|
||||
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(retrievedDataColumns))
|
||||
})
|
||||
|
||||
t.Run("block not found", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent"), 32)
|
||||
_, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(nonExistentRoot), nil)
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
8
beacon-chain/rpc/options/BUILD.bazel
Normal file
8
beacon-chain/rpc/options/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["options.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
24
beacon-chain/rpc/options/options.go
Normal file
24
beacon-chain/rpc/options/options.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package options
|
||||
|
||||
// BlobsOption is a functional option for configuring blob retrieval
|
||||
type BlobsOption func(*BlobsConfig)
|
||||
|
||||
// BlobsConfig holds configuration for blob retrieval
|
||||
type BlobsConfig struct {
|
||||
Indices []int
|
||||
VersionedHashes [][]byte
|
||||
}
|
||||
|
||||
// WithIndices specifies blob indices to retrieve
|
||||
func WithIndices(indices []int) BlobsOption {
|
||||
return func(c *BlobsConfig) {
|
||||
c.Indices = indices
|
||||
}
|
||||
}
|
||||
|
||||
// WithVersionedHashes specifies versioned hashes to retrieve blobs by
|
||||
func WithVersionedHashes(hashes [][]byte) BlobsOption {
|
||||
return func(c *BlobsConfig) {
|
||||
c.VersionedHashes = hashes
|
||||
}
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
@@ -309,10 +309,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlockWithRoot(block, root)
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -323,18 +324,18 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := vs.broadcastReceiveBlock(ctx, block, root); err != nil {
|
||||
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
|
||||
errChan <- errors.Wrap(err, "broadcast/receive block failed")
|
||||
return
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
}
|
||||
wg.Wait()
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
|
||||
}
|
||||
@@ -348,10 +349,10 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
@@ -398,21 +399,28 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
}
|
||||
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
block blocks.ROBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
// Construct data column sidecars from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(block))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return nil, roDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
@@ -424,7 +432,26 @@ func (vs *Server) handleUnblindedBlock(
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast block")
|
||||
}
|
||||
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
|
||||
})
|
||||
|
||||
if err := vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil); err != nil {
|
||||
return errors.Wrap(err, "receive block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
defer wg.Done()
|
||||
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -432,11 +459,13 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
if err := vs.P2P.Broadcast(ctx, protoBlock); err != nil {
|
||||
return errors.Wrap(err, "broadcast failed")
|
||||
}
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
|
||||
})
|
||||
return vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
}).Debug("Broadcasted block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
@@ -468,26 +497,21 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
roSidecars []blocks.RODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sidecar := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
for _, roSidecar := range roSidecars {
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
@@ -495,10 +519,6 @@ func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
@@ -509,6 +529,11 @@ func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
|
||||
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
func BuildBlobSidecars(blk interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil, nil // No blobs before deneb.
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -145,7 +145,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
|
||||
}
|
||||
s.listener = lis
|
||||
log.WithField("address", address).Info("gRPC server listening on port")
|
||||
log.WithField("address", address).Info("Beacon chain gRPC server listening")
|
||||
|
||||
opts := []grpc.ServerOption{
|
||||
grpc.StatsHandler(otelgrpc.NewServerHandler()),
|
||||
@@ -351,7 +351,7 @@ func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
if s.listener != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
log.Debug("Initiated graceful stop of gRPC server")
|
||||
log.Debug("Completed graceful stop of beacon-chain gRPC server")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,8 +42,9 @@ func TestLifecycle_OK(t *testing.T) {
|
||||
|
||||
rpcService.Start()
|
||||
|
||||
require.LogsContain(t, hook, "listening on port")
|
||||
require.LogsContain(t, hook, "Beacon chain gRPC server listening")
|
||||
assert.NoError(t, rpcService.Stop())
|
||||
require.LogsContain(t, hook, "Completed graceful stop of beacon-chain gRPC server")
|
||||
}
|
||||
|
||||
func TestStatus_CredentialError(t *testing.T) {
|
||||
@@ -84,7 +85,7 @@ func TestRPC_InsecureEndpoint(t *testing.T) {
|
||||
|
||||
rpcService.Start()
|
||||
|
||||
require.LogsContain(t, hook, "listening on port")
|
||||
require.LogsContain(t, hook, "Beacon chain gRPC server listening")
|
||||
require.LogsContain(t, hook, "You are using an insecure gRPC server")
|
||||
assert.NoError(t, rpcService.Stop())
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user