mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
79 Commits
d929e1dcaa
...
actions-go
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0246cc26c7 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 | ||
|
|
83d75bcb78 | ||
|
|
ba5f7361ad | ||
|
|
8fa956036a | ||
|
|
58ce1c25f5 | ||
|
|
98532a2df3 | ||
|
|
08be6fde92 | ||
|
|
4585cdc932 | ||
|
|
aa47435c91 | ||
|
|
80eba4e6dd | ||
|
|
606294e17f | ||
|
|
977e923692 | ||
|
|
013b6b1d60 | ||
|
|
66ff6f70b8 | ||
|
|
5b1a9fb077 | ||
|
|
35bc9b1a0f | ||
|
|
02dca85251 | ||
|
|
39b2163702 | ||
|
|
d67ee62efa | ||
|
|
9f9401e615 | ||
|
|
6b89d839f6 | ||
|
|
d826a3c7fe | ||
|
|
900f162467 | ||
|
|
5266d34a22 | ||
|
|
c941e47b7e | ||
|
|
54991bbc52 | ||
|
|
7e32bbc199 | ||
|
|
a1be3d68cd | ||
|
|
88af3f90a5 | ||
|
|
600169a53b | ||
|
|
a5e4fccb47 | ||
|
|
e589588f47 | ||
|
|
41884d8d9d | ||
|
|
db074cbf12 | ||
|
|
360e89767f | ||
|
|
238d5c07df | ||
|
|
2292d955a3 | ||
|
|
76bc30e8ba | ||
|
|
a5c7c6da06 | ||
|
|
4b09dd4aa5 | ||
|
|
1dab5a9f8a | ||
|
|
d681232fe6 | ||
|
|
1d24f89c96 | ||
|
|
967193e6a2 | ||
|
|
9e40551852 | ||
|
|
a8cab58f7e | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c | ||
|
|
5c68ec5c39 | ||
|
|
5410232bef | ||
|
|
3f5c4df7e0 | ||
|
|
5c348dff59 | ||
|
|
8136ff7c3a | ||
|
|
f690af81fa | ||
|
|
029b896c79 | ||
|
|
e1117a7de2 | ||
|
|
39b2a02f66 | ||
|
|
4e8a710b64 | ||
|
|
7191a5bcdf | ||
|
|
d335a52c49 | ||
|
|
c7401f5e75 | ||
|
|
0057cc57b5 | ||
|
|
b1dc5e485d | ||
|
|
f035da6fc5 | ||
|
|
854f4bc9a3 | ||
|
|
1933adedbf | ||
|
|
278b796e43 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 |
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24-alpine
|
||||
FROM golang:1.25.1-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -44,27 +44,27 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
version: v2.4
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
121
.golangci.yml
121
.golangci.yml
@@ -1,90 +1,41 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
|
||||
go: 1.23.5
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
@@ -2993,7 +2993,7 @@ There are two known issues with this release:
|
||||
|
||||
### Added
|
||||
|
||||
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
|
||||
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
|
||||
details.
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
|
||||
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
|
||||
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
|
||||
"go build" project.
|
||||
|
||||
|
||||
58
WORKSPACE
58
WORKSPACE
@@ -158,15 +158,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
sha256 = "a729c8ed2447c90fe140077689079ca0acfb7580ec41637f312d650ce9d93d96",
|
||||
urls = [
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.6",
|
||||
go_version = "1.25.1",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.5"
|
||||
consensus_spec_version = "v1.6.0-beta.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-BXuEb1XbeSft0qzVFnoB8KC0YR1qM3ybT5lKUDbUWn8=",
|
||||
"minimal": "sha256-EjwSHgBbWSoy5hm9V+A/bVMabyojaKsBNPrRtuPVq4k=",
|
||||
"mainnet": "sha256-OGWMzarzaV1B9mVpy48/DCUbhjfX+b64pAxWwPLWhAs=",
|
||||
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
|
||||
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
|
||||
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-FQWR5EZuVcQGR0ol9vpd7eunnfGexJ/7J3xycrFEJbU=",
|
||||
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -300,22 +300,6 @@ filegroup(
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
|
||||
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -327,9 +311,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
|
||||
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
|
||||
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -359,9 +343,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
|
||||
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -375,17 +359,17 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
|
||||
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
strip_prefix = "protobuf-25.1",
|
||||
sha256 = "7c3ebd7aaedd86fa5dc479a0fda803f602caaf78d8aff7ce83b89e1b8ae7442a",
|
||||
strip_prefix = "protobuf-28.3",
|
||||
urls = [
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz",
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v28.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -30,10 +30,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postBlindedBeaconBlockV2Path = "/eth/v2/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -512,7 +513,7 @@ func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.R
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockV2Path, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
@@ -1561,7 +1561,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
@@ -1586,7 +1586,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
@@ -1612,7 +1612,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
)
|
||||
|
||||
@@ -290,3 +290,9 @@ type GetProposerLookaheadResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
type GetBlobsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
@@ -56,3 +56,19 @@ type ForkChoiceNodeExtraData struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Target string `json:"target"`
|
||||
}
|
||||
|
||||
type GetDebugDataColumnSidecarsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*DataColumnSidecar `json:"data"`
|
||||
}
|
||||
|
||||
type DataColumnSidecar struct {
|
||||
Index string `json:"index"`
|
||||
Column []string `json:"column"`
|
||||
KzgCommitments []string `json:"kzg_commitments"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
|
||||
KzgCommitmentsInclusionProof []string `json:"kzg_commitments_inclusion_proof"`
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -63,6 +62,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -148,7 +148,6 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -161,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -218,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -268,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -481,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -544,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -576,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -595,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -623,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -642,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -662,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -701,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -725,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -752,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
@@ -102,7 +105,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,30 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
@@ -27,18 +47,121 @@ func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -35,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -159,7 +158,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -247,7 +246,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -284,14 +283,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
// Insert all nodes to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -664,14 +659,14 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -691,8 +686,6 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -726,6 +719,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
@@ -751,14 +745,14 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -775,33 +769,17 @@ func (s *Service) areDataColumnsAvailable(
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
missingCount := uint64(len(missing))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if missingCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
@@ -817,7 +795,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
if _, ok := missing[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -828,10 +806,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
delete(missing, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -839,10 +817,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
@@ -926,16 +904,6 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -3,14 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -30,6 +28,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -129,35 +131,26 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
return errors.New("attested block is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
return errors.New("attested state is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
@@ -165,98 +158,17 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
|
||||
return
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
|
||||
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState,
|
||||
) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
log.Debug("Skip saving light client finality update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
|
||||
log.Debug("Skip saving light client optimistic update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
log.Debug("Processed light client updates")
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
@@ -454,6 +366,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
@@ -462,15 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
root := signed.Block().ParentRoot()
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -489,12 +397,13 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 1 {
|
||||
if len(pendingNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
slices.Reverse(pendingNodes)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -375,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -2132,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
@@ -2720,6 +2795,11 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
@@ -2742,6 +2822,8 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -2756,10 +2838,9 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
t.Run("no old update", func(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
s.processLightClientUpdates(cfg)
|
||||
// Check that the light client update is saved
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2773,12 +2854,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2802,12 +2883,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
@@ -2877,14 +2958,18 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2902,7 +2987,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2914,7 +3001,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2962,7 +3051,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3024,7 +3115,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3043,7 +3136,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
@@ -3115,6 +3210,11 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
oldOptions []util.LightClientOption
|
||||
@@ -3130,7 +3230,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
{
|
||||
name: "Same age",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()}, // supermajority does not matter here and is only added to result in two different updates
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority(0)}, // supermajority does not matter here and is only added to result in two different updates
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
@@ -3174,14 +3274,14 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3195,7 +3295,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3236,6 +3336,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
s.head = &head{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -3314,15 +3415,18 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
blkRoot, err := lOld.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lOld.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
// check that the old update is saved
|
||||
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
|
||||
@@ -3333,7 +3437,11 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
blkRoot, err := lNew.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lNew.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
// check that the actual old update and the actual new update are different
|
||||
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -93,7 +92,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
@@ -104,17 +103,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validator execution and consensus")
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle da")
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
@@ -135,10 +134,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.postBlockProcess(args); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "post block process")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "update checkpoints")
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if s.slasherEnabled {
|
||||
@@ -152,12 +151,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle caches")
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
@@ -240,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -320,13 +301,28 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
|
||||
return
|
||||
}
|
||||
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up the light client store caches
|
||||
go func() {
|
||||
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not migrate light client store to cold storage")
|
||||
} else {
|
||||
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -192,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -594,11 +596,7 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sss)
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -14,13 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -20,7 +21,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +31,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
@@ -112,6 +113,7 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
slices.Reverse(chain)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
@@ -11,21 +11,21 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -723,7 +723,8 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dcs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
60
beacon-chain/cache/sync_committee.go
vendored
60
beacon-chain/cache/sync_committee.go
vendored
@@ -67,6 +67,30 @@ func (s *SyncCommitteeCache) Clear() {
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions returns current period positions of validator indices with respect with
|
||||
// sync committee. If any input validator index has no assignment, an empty list will be returned
|
||||
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.positionsInCommittee(root, indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([][]primitives.CommitteeIndex, len(pos))
|
||||
for i, p := range pos {
|
||||
if p == nil {
|
||||
result[i] = []primitives.CommitteeIndex{}
|
||||
} else {
|
||||
result[i] = p.currentPeriod
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
@@ -104,11 +128,7 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,13 +141,33 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result := make([]*positionInCommittee, len(indices))
|
||||
for i, idx := range indices {
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[idx]
|
||||
if ok {
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
result[i] = idxInCommittee
|
||||
} else {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result[i] = nil
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(positions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
return positions[0], nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
|
||||
@@ -16,6 +16,11 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions -- fake
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
@@ -39,11 +40,11 @@ func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
@@ -75,10 +76,9 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
|
||||
}
|
||||
slashedAny = true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -44,11 +45,10 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
})}}
|
||||
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -62,16 +62,15 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -243,7 +242,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -265,3 +264,83 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 8)
|
||||
bal, err := helpers.TotalActiveBalance(st)
|
||||
require.NoError(t, err)
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
|
||||
vals := st.Validators()
|
||||
|
||||
// We set the total effective balance of slashed validators
|
||||
// higher than the churn limit for a single epoch.
|
||||
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
|
||||
sl1att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
sl1att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
slashing1 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl1att1,
|
||||
Attestation_2: sl1att2,
|
||||
}
|
||||
sl2att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
sl2att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
slashing2 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl2att1,
|
||||
Attestation_2: sl2att2,
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := keys[0].Sign(signingRoot[:])
|
||||
sig1 := keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[0].Sign(signingRoot[:])
|
||||
sig1 = keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -50,13 +49,12 @@ func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
exitInfo *v.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
@@ -68,15 +66,8 @@ func ProcessVoluntaryExits(
|
||||
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
if err == nil {
|
||||
if exitEpoch > maxExitEpoch {
|
||||
maxExitEpoch = exitEpoch
|
||||
churn = 1
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
|
||||
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -46,7 +47,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator has not been active long enough to exit"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -124,7 +125,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry := newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {
|
||||
|
||||
@@ -184,45 +184,54 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateFulu:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockFulu{
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
}
|
||||
|
||||
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
|
||||
return ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,11 +19,6 @@ import (
|
||||
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
|
||||
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
|
||||
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
// slashing conditions if any slashable events occurred.
|
||||
@@ -54,11 +49,11 @@ func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +66,7 @@ func ProcessProposerSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
if slashing == nil {
|
||||
@@ -80,7 +75,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "mismatched header slots"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "expected slashing headers to differ"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
|
||||
"validator with key %#x is not slashable",
|
||||
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
|
||||
)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
|
||||
@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
// Handle validator ejections.
|
||||
for _, idx := range eligibleForEjection {
|
||||
var err error
|
||||
// exitQueueEpoch and churn arguments are not used in electra.
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
|
||||
// exit info is not used in electra
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -46,18 +47,21 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -68,7 +72,7 @@ func ProcessOperations(
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
|
||||
@@ -147,9 +147,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -99,8 +99,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err := electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process epoch in fulu transition")
|
||||
}
|
||||
return processProposerLookahead(ctx, state)
|
||||
return ProcessProposerLookahead(ctx, state)
|
||||
}
|
||||
|
||||
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
func ProcessProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
@@ -56,6 +57,7 @@ go_test(
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"ranges_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
|
||||
@@ -317,23 +317,15 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
|
||||
}
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
@@ -342,12 +334,6 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
|
||||
62
beacon-chain/core/helpers/ranges.go
Normal file
62
beacon-chain/core/helpers/ranges.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// SortedSliceFromMap takes a map with uint64 keys and returns a sorted slice of the keys.
|
||||
func SortedSliceFromMap(toSort map[uint64]bool) []uint64 {
|
||||
slice := make([]uint64, 0, len(toSort))
|
||||
for key := range toSort {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
|
||||
slices.Sort(slice)
|
||||
return slice
|
||||
}
|
||||
|
||||
// PrettySlice returns a pretty string representation of a sorted slice of uint64.
|
||||
// `sortedSlice` must be sorted in ascending order.
|
||||
// Example: [1,2,3,5,6,7,8,10] -> "1-3,5-8,10"
|
||||
func PrettySlice(sortedSlice []uint64) string {
|
||||
if len(sortedSlice) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var result string
|
||||
start := sortedSlice[0]
|
||||
end := sortedSlice[0]
|
||||
|
||||
for i := 1; i < len(sortedSlice); i++ {
|
||||
if sortedSlice[i] == end+1 {
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d,", start)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d,", start, end)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d", start)
|
||||
return result
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d", start, end)
|
||||
return result
|
||||
}
|
||||
|
||||
// SortedPrettySliceFromMap combines SortedSliceFromMap and PrettySlice to return a pretty string representation of the keys in a map.
|
||||
func SortedPrettySliceFromMap(toSort map[uint64]bool) string {
|
||||
sorted := SortedSliceFromMap(toSort)
|
||||
return PrettySlice(sorted)
|
||||
}
|
||||
64
beacon-chain/core/helpers/ranges_test.go
Normal file
64
beacon-chain/core/helpers/ranges_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 3: true, 8: true, 1: true}
|
||||
expected := []uint64{1, 3, 5, 8}
|
||||
|
||||
actual := helpers.SortedSliceFromMap(input)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrettySlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []uint64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty slice",
|
||||
input: []uint64{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only distinct elements",
|
||||
input: []uint64{1, 3, 5, 7, 9},
|
||||
expected: "1,3,5,7,9",
|
||||
},
|
||||
{
|
||||
name: "single range",
|
||||
input: []uint64{1, 2, 3, 4, 5},
|
||||
expected: "1-5",
|
||||
},
|
||||
{
|
||||
name: "multiple ranges and distinct elements",
|
||||
input: []uint64{1, 2, 3, 5, 6, 7, 8, 10, 12, 13, 14},
|
||||
expected: "1-3,5-8,10,12-14",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual := helpers.PrettySlice(tt.input)
|
||||
require.Equal(t, tt.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortedPrettySliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 7: true, 8: true, 10: true}
|
||||
expected := "5,7-8,10"
|
||||
|
||||
actual := helpers.SortedPrettySliceFromMap(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -87,6 +87,11 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// UpdateTotalActiveBalanceCache updates the cache with the given total active balance.
|
||||
func UpdateTotalActiveBalanceCache(s state.BeaconState, total uint64) error {
|
||||
return balanceCache.AddTotalEffectiveBalance(s, total)
|
||||
}
|
||||
|
||||
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -297,3 +297,30 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateTotalActiveBalanceCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create a test state with some validators
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 31 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test updating cache with a specific total
|
||||
testTotal := uint64(95 * 1e9) // 32 + 32 + 31 = 95
|
||||
err = helpers.UpdateTotalActiveBalanceCache(state, testTotal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the cache was updated by retrieving the total active balance
|
||||
// which should now return the cached value
|
||||
cachedTotal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testTotal, cachedTotal, "Cache should return the updated total")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,39 @@ var (
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
)
|
||||
|
||||
// CurrentPeriodPositions returns committee indices of the current period sync committee for input validators.
|
||||
func CurrentPeriodPositions(st state.BeaconState, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pos, err := syncCommitteeCache.CurrentPeriodPositions(root, indices)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||
log.WithError(err).Error("Could not fill sync committee cache on miss")
|
||||
}
|
||||
}()
|
||||
|
||||
pos = make([][]primitives.CommitteeIndex, len(indices))
|
||||
for i, idx := range indices {
|
||||
pubkey := st.PubkeyAtIndex(idx)
|
||||
pos[i] = findSubCommitteeIndices(pubkey[:], committee.Pubkeys)
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
|
||||
// along with the sync committee root.
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
|
||||
@@ -17,6 +17,38 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCurrentPeriodPositions(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys[i] = bytesutil.PadTo(k, 48)
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee([32]byte{}, state))
|
||||
|
||||
positions, err := helpers.CurrentPeriodPositions(state, []primitives.ValidatorIndex{0, 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(positions))
|
||||
require.Equal(t, 1, len(positions[0]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), positions[0][0])
|
||||
require.Equal(t, 1, len(positions[1]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), positions[1][0])
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
|
||||
@@ -309,23 +309,29 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[slot%spe], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[spe+slot%spe], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
// Even if the state is post Fulu, we may request a past proposer index.
|
||||
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
|
||||
// We can use the cached lookahead only for the current and the next epoch.
|
||||
if e == stateEpoch || e == stateEpoch+1 {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
}
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
|
||||
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -1161,6 +1161,10 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
@@ -1180,8 +1184,4 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
state, err := s.beaconDB.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
if state == nil {
|
||||
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client update for the given period from the database
|
||||
update, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package light_client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
@@ -19,11 +19,11 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -4,15 +4,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
@@ -20,12 +15,9 @@ import (
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of an uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
@@ -117,44 +109,6 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
@@ -194,72 +148,3 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for columnIndex := range numberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,9 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
@@ -31,93 +27,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nil signed block", func(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("empty cells and proofs", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
|
||||
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -63,17 +63,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -256,9 +253,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
for i := range int64(b.N) {
|
||||
// Generate new random sidecars to ensure the KZG backend does not cache anything.
|
||||
sidecars := generateRandomSidecars(b, i, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
|
||||
|
||||
for _, sidecar := range roDataColumnSidecars {
|
||||
for _, sidecar := range sidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
@@ -282,7 +278,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
b.ResetTimer()
|
||||
|
||||
for j := range int64(b.N) {
|
||||
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
allSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for k := int64(0); k < numberOfColumns; k += columnsCount {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
|
||||
@@ -292,10 +288,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -323,8 +317,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
for j := range int64(batchCount) {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
|
||||
allSidecars = append(allSidecars, roDataColumnSidecars)
|
||||
allSidecars = append(allSidecars, sidecars)
|
||||
}
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
@@ -358,7 +351,7 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.RODataColumn {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
@@ -379,20 +372,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataCo
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
rob, err := blocks.NewROBlock(sBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
ErrBlobIndexTooHigh = errors.New("blob index is too high")
|
||||
ErrBlockRootMismatch = errors.New("block root mismatch")
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
ErrNilBlobAndProof = errors.New("nil blob and proof")
|
||||
)
|
||||
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
@@ -28,19 +29,19 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(inVerifiedRoSidecars) == 0 {
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Safely retrieve the first sidecar as a reference.
|
||||
referenceSidecar := inVerifiedRoSidecars[0]
|
||||
referenceSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Check if all columns have the same length and are commmitted to the same block.
|
||||
blobCount := len(referenceSidecar.Column)
|
||||
blockRoot := referenceSidecar.BlockRoot()
|
||||
for _, sidecar := range inVerifiedRoSidecars[1:] {
|
||||
for _, sidecar := range verifiedRoSidecars[1:] {
|
||||
if len(sidecar.Column) != blobCount {
|
||||
return nil, ErrColumnLengthsDiffer
|
||||
}
|
||||
@@ -51,8 +52,8 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(inVerifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range inVerifiedRoSidecars {
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
@@ -62,12 +63,6 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sidecars are verified and are committed to the same block.
|
||||
// All signed block headers, KZG commitments, and inclusion proofs are the same.
|
||||
signedBlockHeader := referenceSidecar.SignedBlockHeader
|
||||
kzgCommitments := referenceSidecar.KzgCommitments
|
||||
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -100,78 +95,20 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
|
||||
// Input sidecars are verified, and we reconstructed ourselves the missing sidecars.
|
||||
// As a consequence, reconstructed sidecars are also verified.
|
||||
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
reconstructedVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
for _, sidecar := range outSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO data column with root")
|
||||
}
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
reconstructedVerifiedRoSidecars = append(reconstructedVerifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
return outVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
// cell proofs are "flattened".
|
||||
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the cells count is equal to the cell proofs count.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
return reconstructedVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
@@ -256,6 +193,89 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns*kzg.BytesPerProof)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
|
||||
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
referenceSidecar := dataColumnSidecars[0]
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
@@ -124,50 +124,6 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
blobCount = 3
|
||||
cellsPerBlob = fieldparams.CellsPerBlob
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Extract blobs and proofs from the sidecars.
|
||||
blobs := make([][]byte, 0, blobCount)
|
||||
cellProofs := make([][]byte, 0, cellsPerBlob)
|
||||
for blobIndex := range blobCount {
|
||||
blob := make([]byte, 0, cellsPerBlob)
|
||||
for columnIndex := range cellsPerBlob {
|
||||
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
|
||||
blob = append(blob, cell...)
|
||||
}
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
|
||||
cellProofs = append(cellProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract the base verified ro sidecars into sidecars.
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
|
||||
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
|
||||
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
|
||||
}
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
@@ -250,7 +206,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// Compute cells and proofs from blob sidecars.
|
||||
var wg errgroup.Group
|
||||
blobs := make([][]byte, blobCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
@@ -267,7 +223,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
cellsAndProofs[i] = cp
|
||||
inputCellsAndProofs[i] = cp
|
||||
|
||||
return nil
|
||||
})
|
||||
@@ -278,25 +234,24 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range cellsAndProofs {
|
||||
for _, cp := range inputCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Construct data column sidecars.
|
||||
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
|
||||
// `TestConstructDataColumnSidecars` tests pass.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
// Compute celles and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to verified data column sidecars.
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -339,3 +294,162 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
// Create proofs for 2 blobs worth of columns
|
||||
cellProofs := make([][]byte, 2*numberOfColumns)
|
||||
|
||||
_, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobs := make([][]byte, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
expectedCellsAndProofs[i] = cp
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range expectedCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
t.Run("nil blob and proof", func(t *testing.T) {
|
||||
_, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
const blobCount = 2
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
var wg errgroup.Group
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
expectedCellsAndProofs[i] = cellsAndProofs
|
||||
|
||||
kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs))
|
||||
for _, proof := range cellsAndProofs.Proofs {
|
||||
kzgProofs = append(kzgProofs, proof[:])
|
||||
}
|
||||
|
||||
blobAndProof := &pb.BlobAndProofV2{
|
||||
Blob: blob,
|
||||
KzgProofs: kzgProofs,
|
||||
}
|
||||
blobsAndProofs[i] = blobAndProof
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,12 +1,76 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilSignedBlockOrEmptyCellsAndProofs = errors.New("nil signed block or empty cells and proofs")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
)
|
||||
|
||||
var (
|
||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
BlockType = "BeaconBlock"
|
||||
SidecarType = "DataColumnSidecar"
|
||||
)
|
||||
|
||||
type (
|
||||
// ConstructionPopulator is an interface that can be satisfied by a type that can use data from a struct
|
||||
// like a DataColumnSidecar or a BeaconBlock to set the fields in a data column sidecar that cannot
|
||||
// be obtained from the engine api.
|
||||
ConstructionPopulator interface {
|
||||
Slot() primitives.Slot
|
||||
Root() [fieldparams.RootLength]byte
|
||||
ProposerIndex() primitives.ValidatorIndex
|
||||
Commitments() ([][]byte, error)
|
||||
Type() string
|
||||
|
||||
extract() (*blockInfo, error)
|
||||
}
|
||||
|
||||
// BlockReconstructionSource is a ConstructionPopulator that uses a beacon block as the source of data
|
||||
BlockReconstructionSource struct {
|
||||
blocks.ROBlock
|
||||
}
|
||||
|
||||
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
|
||||
SidecarReconstructionSource struct {
|
||||
blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
blockInfo struct {
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||
kzgCommitments [][]byte
|
||||
kzgInclusionProof [][]byte
|
||||
}
|
||||
)
|
||||
|
||||
// PopulateFromBlock creates a BlockReconstructionSource from a beacon block
|
||||
func PopulateFromBlock(block blocks.ROBlock) *BlockReconstructionSource {
|
||||
return &BlockReconstructionSource{ROBlock: block}
|
||||
}
|
||||
|
||||
// PopulateFromSidecar creates a SidecarReconstructionSource from a data column sidecar
|
||||
func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstructionSource {
|
||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
@@ -28,3 +92,158 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars, given ConstructionPopulator and the cells/proofs associated with each blob in the
|
||||
// block, assembles sidecars which can be distributed to peers.
|
||||
// This is an adapted version of
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars,
|
||||
// which is designed to be used both when constructing sidecars from a block and from a sidecar, replacing
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
info, err := src.extract()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
KzgCommitments: info.kzgCommitments,
|
||||
KzgProofs: proofs[idx],
|
||||
SignedBlockHeader: info.signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
|
||||
}
|
||||
|
||||
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ro data column")
|
||||
}
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return roSidecars, nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the source
|
||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the source
|
||||
func (s *BlockReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
||||
return s.Block().ProposerIndex()
|
||||
}
|
||||
|
||||
// Commitments returns the blob KZG commitments of the source
|
||||
func (s *BlockReconstructionSource) Commitments() ([][]byte, error) {
|
||||
c, err := s.Block().Body().BlobKzgCommitments()
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *BlockReconstructionSource) Type() string {
|
||||
return BlockType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
|
||||
block := b.Block()
|
||||
|
||||
header, err := b.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "header")
|
||||
}
|
||||
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "commitments")
|
||||
}
|
||||
|
||||
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof kzg commitments")
|
||||
}
|
||||
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: header,
|
||||
kzgCommitments: commitments,
|
||||
kzgInclusionProof: inclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
|
||||
// and returns a 2D slice where x is columns and y is rows.
|
||||
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
cellCols := make([][][]byte, numCols)
|
||||
proofCols := make([][][]byte, numCols)
|
||||
for i, cp := range rows {
|
||||
if uint64(len(cp.Cells)) != numCols {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
|
||||
}
|
||||
if len(cp.Cells) != len(cp.Proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(rows))
|
||||
proofCols[j] = make([][]byte, len(rows))
|
||||
}
|
||||
cellCols[j][i] = cp.Cells[j][:]
|
||||
proofCols[j][i] = cp.Proofs[j][:]
|
||||
}
|
||||
}
|
||||
return cellCols, proofCols, nil
|
||||
}
|
||||
|
||||
// Root returns the block root of the source
|
||||
func (s *SidecarReconstructionSource) Root() [fieldparams.RootLength]byte {
|
||||
return s.BlockRoot()
|
||||
}
|
||||
|
||||
// Commmitments returns the blob KZG commitments of the source
|
||||
func (s *SidecarReconstructionSource) Commitments() ([][]byte, error) {
|
||||
return s.KzgCommitments, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *SidecarReconstructionSource) Type() string {
|
||||
return SidecarType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: s.SignedBlockHeader,
|
||||
kzgCommitments: s.KzgCommitments,
|
||||
kzgInclusionProof: s.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,15 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
@@ -53,3 +57,218 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
require.ErrorContains(t, "not enough proofs", err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
// Verify each sidecar has the expected structure
|
||||
for i, sidecar := range sidecars {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 2, len(sidecar.Column))
|
||||
require.Equal(t, 2, len(sidecar.KzgCommitments))
|
||||
require.Equal(t, 2, len(sidecar.KzgProofs))
|
||||
|
||||
// Verify commitments match what we set
|
||||
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
|
||||
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
|
||||
|
||||
// Verify column data comes from the correct cells
|
||||
require.Equal(t, byte(i), sidecar.Column[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.Column[1][0])
|
||||
|
||||
// Verify proofs come from the correct proofs
|
||||
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
t.Run("from block", func(t *testing.T) {
|
||||
src := peerdas.PopulateFromBlock(rob)
|
||||
require.Equal(t, rob.Block().Slot(), src.Slot())
|
||||
require.Equal(t, rob.Root(), src.Root())
|
||||
require.Equal(t, rob.Block().ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.BlockType, src.Type())
|
||||
})
|
||||
|
||||
t.Run("from sidecar", func(t *testing.T) {
|
||||
referenceSidecar := blocks.NewVerifiedRODataColumn(sidecars[0])
|
||||
src := peerdas.PopulateFromSidecar(referenceSidecar)
|
||||
require.Equal(t, referenceSidecar.Slot(), src.Slot())
|
||||
require.Equal(t, referenceSidecar.BlockRoot(), src.Root())
|
||||
require.Equal(t, referenceSidecar.ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.SidecarType, src.Type())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -374,15 +375,18 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -393,7 +397,7 @@ func altairOperations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
@@ -401,15 +405,18 @@ func altairOperations(
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
@@ -420,5 +427,9 @@ func phase0Operations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -13,34 +13,55 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ExitInfo provides information about validator exits in the state.
|
||||
type ExitInfo struct {
|
||||
HighestExitEpoch primitives.Epoch
|
||||
Churn uint64
|
||||
TotalActiveBalance uint64
|
||||
}
|
||||
|
||||
// ErrValidatorAlreadyExited is an error raised when trying to process an exit of
|
||||
// an already exited validator
|
||||
var ErrValidatorAlreadyExited = errors.New("validator already exited")
|
||||
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
// ExitInformation returns information about validator exits.
|
||||
func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
exitInfo := &ExitInfo{}
|
||||
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
totalActiveBalance := uint64(0)
|
||||
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
if e != farFutureEpoch {
|
||||
if e > maxExitEpoch {
|
||||
maxExitEpoch = e
|
||||
churn = 1
|
||||
} else if e == maxExitEpoch {
|
||||
churn++
|
||||
if e > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = e
|
||||
exitInfo.Churn = 1
|
||||
} else if e == exitInfo.HighestExitEpoch {
|
||||
exitInfo.Churn++
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total active balance in the same loop
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
totalActiveBalance += val.EffectiveBalance()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
_ = err
|
||||
return
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
// InitiateValidatorExit takes in validator index and updates
|
||||
@@ -64,59 +85,117 @@ func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, c
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
|
||||
func InitiateValidatorExit(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitQueueEpoch
|
||||
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
return s, exitQueueEpoch, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InitiateValidatorExitForTotalBal has the same functionality as InitiateValidatorExit,
|
||||
// the only difference being how total active balance is obtained. In InitiateValidatorExit
|
||||
// it is calculated inside the function and in InitiateValidatorExitForTotalBal it's a
|
||||
// function argument.
|
||||
func InitiateValidatorExitForTotalBal(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
totalActiveBalance primitives.Gwei,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance, primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, exitInfo *ExitInfo) error {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if exitInfo.Churn >= currentChurn {
|
||||
exitInfo.HighestExitEpoch, err = exitInfo.HighestExitEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exitInfo.Churn = 1
|
||||
} else {
|
||||
exitInfo.Churn = exitInfo.Churn + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SlashValidator slashes the malicious validator's balance and awards
|
||||
@@ -152,9 +231,12 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
func SlashValidator(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
|
||||
@@ -49,9 +49,11 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, 0, 199, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: 199, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, 0, exitInfo)
|
||||
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
assert.Equal(t, primitives.Epoch(199), exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
|
||||
@@ -68,9 +70,11 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+2, epoch)
|
||||
assert.Equal(t, exitedEpoch+2, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(2), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -88,9 +92,11 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 4)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 4}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+3, epoch)
|
||||
assert.Equal(t, exitedEpoch+3, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
|
||||
// Because of exit queue overflow,
|
||||
// validator who init exited has to wait one more epoch.
|
||||
@@ -110,7 +116,8 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, _, err = validators.InitiateValidatorExit(t.Context(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: params.BeaconConfig().FarFutureEpoch - 1, Churn: 1}
|
||||
_, err = validators.InitiateValidatorExit(t.Context(), state, 1, exitInfo)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
@@ -146,12 +153,11 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), ebtc)
|
||||
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, &validators.ExitInfo{}) // exit info is not used in electra
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect that the exit epoch is the next available epoch with max seed lookahead.
|
||||
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
|
||||
require.Equal(t, want, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -190,7 +196,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Phase0)
|
||||
|
||||
@@ -244,7 +250,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Electra)
|
||||
|
||||
@@ -505,8 +511,8 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := validators.MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
exitInfo := validators.ExitInformation(s)
|
||||
require.Equal(t, tt.wantedEpoch, exitInfo.HighestExitEpoch)
|
||||
require.Equal(t, tt.wantedChurn, exitInfo.Churn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,18 +122,18 @@ type BlobStorage struct {
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes")
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
log.Info("Blob filesystem cache warm-up started")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
|
||||
@@ -259,7 +259,6 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned channel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
|
||||
@@ -116,19 +116,43 @@ func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummar
|
||||
}
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
|
||||
// Track which period directories might be empty after epoch removal
|
||||
periodsToCheck := make(map[string]struct{})
|
||||
|
||||
for epoch, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
rmdir := l.epochDir(epoch)
|
||||
periodDir := l.periodDir(epoch)
|
||||
|
||||
if len(sum.failedRemovals) == 0 {
|
||||
if err := l.fs.Remove(rmdir); err != nil {
|
||||
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
|
||||
} else {
|
||||
periodsToCheck[periodDir] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
|
||||
}
|
||||
}
|
||||
|
||||
//Clean up empty period directories
|
||||
for periodDir := range periodsToCheck {
|
||||
entries, err := afero.ReadDir(l.fs, periodDir)
|
||||
if err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Debug("Failed to read period directory contents")
|
||||
continue
|
||||
}
|
||||
|
||||
// Only attempt to remove if directory is empty
|
||||
if len(entries) == 0 {
|
||||
if err := l.fs.Remove(periodDir); err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Error("Failed to remove empty period directory")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -195,3 +196,48 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayoutByEpochPruneBefore(t *testing.T) {
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
pruned []testIdent
|
||||
remain []testIdent
|
||||
err error
|
||||
sum pruneSummary
|
||||
}{
|
||||
{
|
||||
name: "single epoch period cleanup",
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 367076, index: 0}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[1], epoch: 371176, index: 0}}, // Different period
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
|
||||
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
|
||||
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
for _, id := range pruned {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
|
||||
dirs := bs.layout.blockParentDirs(id)
|
||||
for i := len(dirs) - 1; i > 0; i-- {
|
||||
_, err = fs.Stat(dirs[i])
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
}
|
||||
}
|
||||
for _, id := range remain {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,17 +4,26 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var errTimeOut = errors.New("operation timed out")
|
||||
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
_ context.Context, maxEpoch primitives.Epoch,
|
||||
ctx context.Context, maxEpoch primitives.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// In some cases, pruning may take a very long time and consume significant memory in the
|
||||
// open Update transaction. Therefore, we impose a 1 minute timeout on this operation.
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedEndPruneEpoch, uint64(maxEpoch))
|
||||
@@ -48,13 +57,18 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
c := signingRootsBkt.Cursor()
|
||||
|
||||
// We begin a pruning iteration starting from the first item in the bucket.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
// Exit the routine if the context has expired.
|
||||
return errTimeOut
|
||||
}
|
||||
|
||||
// We check the epoch from the current key in the database.
|
||||
// If we have hit an epoch that is greater than the end epoch of the pruning process,
|
||||
// we then completely exit the process as we are done.
|
||||
@@ -67,18 +81,27 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// so it is possible we have a few adjacent objects that have the same slot, such as
|
||||
// (target_epoch = 3 ++ _) => encode(attestation)
|
||||
if err := signingRootsBkt.Delete(k); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation signing root")
|
||||
}
|
||||
if err := attRecordsBkt.Delete(v); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation record")
|
||||
}
|
||||
slasherAttestationsPrunedTotal.Inc()
|
||||
numPruned++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if errors.Is(err, errTimeOut) {
|
||||
log.Warning("Aborting pruning routine")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to prune attestations")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -103,6 +104,7 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -101,10 +102,7 @@ const (
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -123,7 +121,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -144,10 +142,9 @@ var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
defer func(start time.Time) {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
}(time.Now())
|
||||
|
||||
d := time.Now().Add(time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
@@ -185,7 +182,10 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
if result.ValidationError != "" {
|
||||
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
|
||||
log.WithField("status", result.Status.String()).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", parentBlockRoot)).
|
||||
WithError(errors.New(result.ValidationError)).
|
||||
Error("Got a validation error in newPayload")
|
||||
}
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
@@ -197,7 +197,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
return nil, errors.Wrapf(ErrUnknownPayloadStatus, "unknown payload status: %s", result.Status.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -651,22 +651,40 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
root := populator.Root()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||
commitments, err := populator.Commitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||
}
|
||||
|
||||
cellsAndProofs, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, populator)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
return verifiedROSidecars, nil
|
||||
}
|
||||
|
||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
@@ -677,47 +695,32 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
return nil, errors.Wrapf(err, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from execution client")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
|
||||
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
// Finally, construct verified RO data column sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||
func upgradeSidecarsToVerifiedSidecars(roSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, roSidecar := range roSidecars {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
return verifiedRODataColumns
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
@@ -1009,6 +1012,6 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
func wrapWithBlockRoot(err error, blockRoot [fieldparams.RootLength]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -927,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
@@ -2556,7 +2557,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -2580,11 +2581,14 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(sb, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
@@ -2594,7 +2598,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
@@ -2607,23 +2611,22 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
// t.Run("missing some blobs", func(t *testing.T) {
|
||||
// blobMasks := []bool{false, true, true, true, true, true}
|
||||
// srv := createBlobServerV2(t, 6, blobMasks)
|
||||
// defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
// rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
// defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
// _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
// require.ErrorContains(t, "fetch cells and proofs from execution client", err)
|
||||
// })
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
|
||||
2
beacon-chain/execution/testdata/fuzz/FuzzForkChoiceResponse/a0ab8cde398c202b
vendored
Normal file
2
beacon-chain/execution/testdata/fuzz/FuzzForkChoiceResponse/a0ab8cde398c202b
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"pAYloAdStAtus\":{\"0000000000000\":\"0000000000000000000000\",\"witness\":\"0\"}}")
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -116,7 +117,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -463,22 +463,20 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
}
|
||||
|
||||
// InsertChain inserts all nodes corresponding to blocks in the slice
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
// where blocks[i].Parent = blocks[i+1]. Also, we assume that the parent of the
|
||||
// last block in this list is already included in forkchoice store.
|
||||
// `blocks`. This slice must be ordered in increasing slot order and
|
||||
// each consecutive entry must be a child of the previous one.
|
||||
// The parent of the first block in this list must already be present in forkchoice.
|
||||
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
for _, bcp := range chain {
|
||||
if _, err := f.store.insert(ctx,
|
||||
chain[i].Block,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
bcp.Block,
|
||||
bcp.JustifiedCheckpoint.Epoch, bcp.FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.updateCheckpoints(ctx, chain[i].JustifiedCheckpoint, chain[i].FinalizedCheckpoint); err != nil {
|
||||
if err := f.updateCheckpoints(ctx, bcp.JustifiedCheckpoint, bcp.FinalizedCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -630,14 +630,15 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
args[i] = blks[10-i-1]
|
||||
}
|
||||
require.NoError(t, f.InsertChain(t.Context(), args))
|
||||
// InsertChain now expects blocks in increasing slot order
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks))
|
||||
|
||||
// Test partial insertion: first insert the foundation blocks, then a subset
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertChain(t.Context(), args[2:]))
|
||||
// Insert first 2 blocks to establish a chain from genesis
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[:2]))
|
||||
// Then insert the remaining blocks
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[2:]))
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
|
||||
@@ -3,11 +3,12 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache.go",
|
||||
"helpers.go",
|
||||
"lightclient.go",
|
||||
"store.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
@@ -38,25 +39,30 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"lightclient_test.go",
|
||||
"store_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
26
beacon-chain/light-client/cache.go
Normal file
26
beacon-chain/light-client/cache.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// cache tracks LC data over the non finalized chain for different branches.
|
||||
type cache struct {
|
||||
items map[[32]byte]*cacheItem
|
||||
}
|
||||
|
||||
// cacheItem represents the LC data for a block. It tracks the best update and finality update seen in that branch.
|
||||
type cacheItem struct {
|
||||
parent *cacheItem // parent item in the cache, can be nil
|
||||
period uint64 // sync committee period
|
||||
slot primitives.Slot // slot of the signature block
|
||||
bestUpdate interfaces.LightClientUpdate
|
||||
bestFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
}
|
||||
|
||||
func newLightClientCache() *cache {
|
||||
return &cache{
|
||||
items: make(map[[32]byte]*cacheItem),
|
||||
}
|
||||
}
|
||||
24
beacon-chain/light-client/cache_test.go
Normal file
24
beacon-chain/light-client/cache_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestLCCache(t *testing.T) {
|
||||
lcCache := newLightClientCache()
|
||||
require.NotNil(t, lcCache)
|
||||
|
||||
item := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: nil,
|
||||
bestFinalityUpdate: nil,
|
||||
}
|
||||
|
||||
blkRoot := [32]byte{4, 5, 6}
|
||||
|
||||
lcCache.items[blkRoot] = item
|
||||
|
||||
require.Equal(t, item, lcCache.items[blkRoot], "Expected to find the item in the cache")
|
||||
}
|
||||
@@ -592,6 +592,10 @@ func HasFinality(update interfaces.LightClientUpdate) (bool, error) {
|
||||
}
|
||||
|
||||
func IsBetterUpdate(newUpdate, oldUpdate interfaces.LightClientUpdate) (bool, error) {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
maxActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Len()
|
||||
newNumActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
oldNumActiveParticipants := oldUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
@@ -778,7 +782,7 @@ func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClie
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -804,7 +808,7 @@ func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityU
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
consensustypes "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
383
beacon-chain/light-client/store.go
Normal file
383
beacon-chain/light-client/store.go
Normal file
@@ -0,0 +1,383 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
cache *cache // non finality cache
|
||||
}
|
||||
|
||||
func NewLightClientStore(p p2p.Accessor, e event.SubscriberSender, db iface.HeadAccessDatabase) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
cache: newLightClientCache(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) SaveLCData(ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
headBlockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// compute required data
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client update")
|
||||
}
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client finality update")
|
||||
}
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client optimistic update")
|
||||
}
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
blockRoot, err := attestedBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute attested block root")
|
||||
}
|
||||
parentRoot := [32]byte(update.AttestedHeader().Beacon().ParentRoot)
|
||||
signatureBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute signature block root")
|
||||
}
|
||||
|
||||
newBlockIsHead := signatureBlockRoot == headBlockRoot
|
||||
|
||||
// create the new cache item
|
||||
newCacheItem := &cacheItem{
|
||||
period: period,
|
||||
slot: attestedBlock.Block().Slot(),
|
||||
}
|
||||
|
||||
// check if parent exists in cache
|
||||
parentItem, ok := s.cache.items[parentRoot]
|
||||
if ok {
|
||||
newCacheItem.parent = parentItem
|
||||
} else {
|
||||
// if not, create an item for the parent, but don't need to save it since it's the accumulated best update and is just used for comparison
|
||||
bestUpdateSoFar, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get best light client update for period %d", period)
|
||||
}
|
||||
parentItem = &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: bestUpdateSoFar,
|
||||
bestFinalityUpdate: s.lastFinalityUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
// if at a period boundary, no need to compare data, just save new ones
|
||||
if parentItem.period != period {
|
||||
newCacheItem.bestUpdate = update
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
|
||||
// if the new block is not head, we don't want to change our lastFinalityUpdate
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(finalityUpdate, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// if in the same period, compare updates
|
||||
isUpdateBetter, err := IsBetterUpdate(update, parentItem.bestUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
if isUpdateBetter {
|
||||
newCacheItem.bestUpdate = update
|
||||
} else {
|
||||
newCacheItem.bestUpdate = parentItem.bestUpdate
|
||||
}
|
||||
|
||||
isBetterFinalityUpdate := IsBetterFinalityUpdate(finalityUpdate, parentItem.bestFinalityUpdate)
|
||||
if isBetterFinalityUpdate {
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
} else {
|
||||
newCacheItem.bestFinalityUpdate = parentItem.bestFinalityUpdate
|
||||
}
|
||||
|
||||
// save new item in cache
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
// save lastOptimisticUpdate if better
|
||||
if isBetterOptimisticUpdate := IsBetterOptimisticUpdate(optimisticUpdate, s.lastOptimisticUpdate); isBetterOptimisticUpdate {
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
}
|
||||
|
||||
// if the new block is considered the head, set the last finality update
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(newCacheItem.bestFinalityUpdate, isBetterFinalityUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, state state.BeaconState) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from the database")
|
||||
}
|
||||
|
||||
cacheUpdatesByPeriod, err := s.getCacheUpdatesByPeriod(headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from cache")
|
||||
}
|
||||
|
||||
for period, update := range cacheUpdatesByPeriod {
|
||||
updatesMap[period] = update
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientUpdate, error) {
|
||||
// we don't need to lock here because the LightClientUpdates method locks the store
|
||||
updates, err := s.LightClientUpdates(ctx, period, period, headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get light client update for period %d", period)
|
||||
}
|
||||
if len(updates) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return updates[0], nil
|
||||
}
|
||||
|
||||
func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaconBlock) (map[uint64]interfaces.LightClientUpdate, error) {
|
||||
updatesByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
|
||||
cacheHeadRoot := headBlock.Block().ParentRoot()
|
||||
|
||||
cacheHeadItem, ok := s.cache.items[cacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Head root %x not found in light client cache. Returning empty updates map for non finality cache.", cacheHeadRoot)
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
for cacheHeadItem != nil {
|
||||
if _, exists := updatesByPeriod[cacheHeadItem.period]; !exists {
|
||||
updatesByPeriod[cacheHeadItem.period] = cacheHeadItem.bestUpdate
|
||||
}
|
||||
cacheHeadItem = cacheHeadItem.parent
|
||||
}
|
||||
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastFinalityUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastOptimisticUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
|
||||
func (s *Store) MigrateToCold(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// If there cache is empty (some problem in processing data), we can skip migration.
|
||||
// This is a safety check and should not happen in normal operation.
|
||||
if len(s.cache.items) == 0 {
|
||||
log.Debug("Non-finality cache is empty. Skipping migration.")
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
finalizedSlot := blk.Block().Slot()
|
||||
finalizedCacheHeadRoot := blk.Block().ParentRoot()
|
||||
|
||||
var finalizedCacheHead *cacheItem
|
||||
var ok bool
|
||||
|
||||
finalizedCacheHead, ok = s.cache.items[finalizedCacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Finalized block's parent root %x not found in light client cache. Cleaning the broken part of the cache.", finalizedCacheHeadRoot)
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
updateByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
// Traverse the cache from the head item to the tail, collecting updates
|
||||
for item := finalizedCacheHead; item != nil; item = item.parent {
|
||||
if _, seen := updateByPeriod[item.period]; seen {
|
||||
// We already have an update for this period, skip this item
|
||||
continue
|
||||
}
|
||||
updateByPeriod[item.period] = item.bestUpdate
|
||||
}
|
||||
|
||||
// save updates to db
|
||||
for period, update := range updateByPeriod {
|
||||
err = s.beaconDB.SaveLightClientUpdate(ctx, period, update)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("failed to save light client update for period %d. Skipping this period.", period)
|
||||
}
|
||||
}
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) cleanCache(finalizedSlot primitives.Slot) {
|
||||
// delete non-finality cache items older than finalized slot
|
||||
for k, v := range s.cache.items {
|
||||
if v.slot < finalizedSlot {
|
||||
delete(s.cache.items, k)
|
||||
}
|
||||
if v.parent != nil && v.parent.slot < finalizedSlot {
|
||||
v.parent = nil // remove parent reference
|
||||
}
|
||||
}
|
||||
}
|
||||
959
beacon-chain/light-client/store_test.go
Normal file
959
beacon-chain/light-client/store_test.go
Normal file
@@ -0,0 +1,959 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update2, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority(0))
|
||||
update3, err := NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0))
|
||||
update4, err := NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update5, err := NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SaveLCData(t *testing.T) {
|
||||
t.Run("no parent in cache or db - new is head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("no parent in cache or db - new not head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.IsNil(t, s.lastFinalityUpdate, "Expected to not find the last finality update in the store since the block is not head")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in db", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
// save an update for this period in db
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveLightClientUpdate(l.Ctx, period, update), "Failed to save light client update in db")
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in cache", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period in cache
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in the previous period", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Bellatrix, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period1 in cache
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period1,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClientStore_MigrateToCold(t *testing.T) {
|
||||
// This tests the scenario where chain advances but the cache is empty.
|
||||
// It should see that there is nothing in the cache to migrate and just update the tail to the new finalized root.
|
||||
t.Run("empty cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items))
|
||||
})
|
||||
|
||||
// This tests the scenario where chain advances but the CANONICAL cache is empty.
|
||||
// It should see that there is nothing in the canonical cache to migrate and just update the tail to the new finalized root AND delete anything non-canonical.
|
||||
t.Run("non canonical cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 1, len(s.cache.items))
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items.
|
||||
// should update the update in db and delete cache.
|
||||
t.Run("mixed cache - finality immediately after cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastAttestedRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastBlockRoot, err = l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items. finalized height is in the middle.
|
||||
// should update the update in db and delete items in cache before finalized slot.
|
||||
t.Run("mixed cache - finality middle of cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// we have multiple periods in the cache before finalization happens. we expect all of them to be saved in db.
|
||||
t.Run("finality after multiple periods in cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.EpochsPerSyncCommitteePeriod = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period1 in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period1, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdatePeriod1 := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod1, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
period2 := period1
|
||||
var lastUpdatePeriod2 interfaces.LightClientUpdate
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod2, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
period2 = slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
}
|
||||
|
||||
require.Equal(t, 6, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 10, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod2, u)
|
||||
u, err = beaconDB.LightClientUpdate(ctx, period1)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod1, u)
|
||||
})
|
||||
}
|
||||
|
||||
func saveInitialFinalizedCheckpointData(t *testing.T, ctx context.Context, beaconDB db.Database) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesisState, genesisRoot))
|
||||
|
||||
finalizedState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedBlock := util.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = 32
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockHeader, err := signedFinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, finalizedState.SetLatestBlockHeader(finalizedBlockHeader.Header))
|
||||
finalizedStateRoot, err := finalizedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
finalizedBlock.Block.StateRoot = finalizedStateRoot[:]
|
||||
signedFinalizedBlock, err = blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockRoot, err := signedFinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ethpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: finalizedBlockRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedFinalizedBlock))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &cp))
|
||||
|
||||
return finalizedBlockRoot, signedFinalizedBlock
|
||||
}
|
||||
|
||||
func TestLightClientStore_LightClientUpdatesByRange(t *testing.T) {
|
||||
t.Run("no updates", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 2, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(updates))
|
||||
})
|
||||
|
||||
t.Run("single update from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 5, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("single update from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItem := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItem
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{3})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - no overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
for i := 0; i < 5; i++ {
|
||||
require.DeepEqual(t, update, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update1, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update2, err := NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepNotEqual(t, update1, update2)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update1))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update2,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
// first two updates should be update1
|
||||
for i := 0; i < 2; i++ {
|
||||
require.DeepEqual(t, update1, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
// next three updates should be update2 - as cache overrides db
|
||||
for i := 2; i < 5; i++ {
|
||||
require.DeepEqual(t, update2, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("first continuous range", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func saveStateAndBlockWithParentRoot(t *testing.T, ctx context.Context, d db.Database, parentRoot [32]byte) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, d, blk)
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveState(ctx, st, blkRoot))
|
||||
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
return blkRoot, signedFinalizedBlock
|
||||
}
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -32,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/monitor"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
@@ -253,10 +253,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
@@ -349,6 +345,11 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
return errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
log.Debugln("Registering Light Client Store")
|
||||
beacon.registerLightClientStore()
|
||||
}
|
||||
|
||||
log.Debugln("Registering Backfill Service")
|
||||
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register Back Fill service")
|
||||
@@ -621,35 +622,55 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseIPNetStrings(ipWhitelist []string) ([]*net.IPNet, error) {
|
||||
ipNets := make([]*net.IPNet, 0, len(ipWhitelist))
|
||||
for _, cidr := range ipWhitelist {
|
||||
_, ipNet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("cidr", cidr).Error("Invalid CIDR in IP colocation whitelist")
|
||||
return nil, err
|
||||
}
|
||||
ipNets = append(ipNets, ipNet)
|
||||
log.WithField("cidr", cidr).Info("Added IP to colocation whitelist")
|
||||
}
|
||||
return ipNets, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not register p2p service")
|
||||
}
|
||||
|
||||
colocationWhitelist, err := parseIPNetStrings(slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PColocationWhitelist.Name)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register p2p service: %w", err)
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
IPColocationWhitelist: colocationWhitelist,
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -941,6 +962,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1118,6 +1140,11 @@ func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.
|
||||
return b.services.RegisterService(bf)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerLightClientStore() {
|
||||
lcs := lightclient.NewLightClientStore(b.fetchP2P(), b.StateFeed(), b.db)
|
||||
b.lcStore = lcs
|
||||
}
|
||||
|
||||
func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
for _, flag := range features.NetworkFlags {
|
||||
for _, name := range flag.Names() {
|
||||
|
||||
@@ -74,7 +74,9 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("enable-light-client", true, "enable light client")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
require.NoError(t, set.Set("enable-light-client", "true"))
|
||||
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
@@ -88,6 +90,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, node.lcStore)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -262,3 +265,46 @@ func TestCORS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIPNetStrings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
whitelist []string
|
||||
wantCount int
|
||||
wantError string
|
||||
}{
|
||||
{
|
||||
name: "empty whitelist",
|
||||
whitelist: []string{},
|
||||
wantCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single IP whitelist",
|
||||
whitelist: []string{"192.168.1.1/32"},
|
||||
wantCount: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple IPs whitelist",
|
||||
whitelist: []string{"192.168.1.0/24", "10.0.0.0/8", "34.42.19.170/32"},
|
||||
wantCount: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid CIDR returns error",
|
||||
whitelist: []string{"192.168.1.0/24", "invalid-cidr", "10.0.0.0/8"},
|
||||
wantCount: 0,
|
||||
wantError: "invalid CIDR address",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := parseIPNetStrings(tt.whitelist)
|
||||
assert.Equal(t, tt.wantCount, len(result))
|
||||
if len(tt.wantError) == 0 {
|
||||
assert.Equal(t, nil, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, tt.wantError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -95,6 +96,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
@@ -184,6 +186,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
@@ -308,19 +309,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", dataColumnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
@@ -330,16 +325,15 @@ func (s *Service) BroadcastDataColumnSidecar(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
@@ -385,8 +379,9 @@ func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
"blobCount": len(dataColumnSidecar.Column),
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
|
||||
@@ -711,13 +711,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Subscribe to the topic.
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
@@ -727,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
@@ -739,5 +734,5 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(t, &result, sidecar)
|
||||
require.DeepEqual(t, &result, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -10,34 +11,56 @@ import (
|
||||
|
||||
// This is the default queue size used if we have specified an invalid one.
|
||||
const defaultPubsubQueueSize = 600
|
||||
const (
|
||||
// defaultConnManagerPruneAbove sets the number of peers where ConnectionManager
|
||||
// will begin to internally prune peers. This value is set based on the internal
|
||||
// value of the libp2p DefaultConectionManager "high water mark". The "low water mark"
|
||||
// is the number of peers where ConnManager will stop pruning. This value is computed
|
||||
// by subtracting connManagerPruneAmount from the high water mark.
|
||||
defaultConnManagerPruneAbove = 192
|
||||
connManagerPruneAmount = 32
|
||||
)
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// connManagerLowHigh picks the low and high water marks for the connection manager based
|
||||
// on the MaxPeers setting. The high water mark will be at least the default high water mark
|
||||
// (192), or MaxPeers + 32, whichever is higher. The low water mark is set to be 32 less than
|
||||
// the high water mark. This is done to ensure the ConnManager never prunes peers that the
|
||||
// node has connected to based on the MaxPeers setting.
|
||||
func (cfg *Config) connManagerLowHigh() (int, int) {
|
||||
maxPeersPlusMargin := int(cfg.MaxPeers) + connManagerPruneAmount
|
||||
high := max(maxPeersPlusMargin, defaultConnManagerPruneAbove)
|
||||
low := high - connManagerPruneAmount
|
||||
return low, high
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errNoCustodyInfo = errors.New("no custody info available")
|
||||
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
@@ -30,7 +32,7 @@ func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
return 0, errNoCustodyInfo
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
@@ -155,6 +157,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -269,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -329,6 +331,7 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
|
||||
@@ -79,7 +79,7 @@ func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
|
||||
rawListener, err := listenerCreator()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new listener")
|
||||
return nil, errors.Wrap(err, "create new listener")
|
||||
}
|
||||
return &listenerWrapper{
|
||||
listener: rawListener,
|
||||
@@ -536,7 +536,7 @@ func (s *Service) createListener(
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
return nil, errors.Wrap(err, "create local node")
|
||||
}
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
@@ -604,13 +604,27 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
|
||||
const delay = 1 * time.Second
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
var custodyGroupCount uint64
|
||||
|
||||
err := errNoCustodyInfo
|
||||
for errors.Is(err, errNoCustodyInfo) {
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if errors.Is(err, errNoCustodyInfo) {
|
||||
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
@@ -652,7 +666,7 @@ func (s *Service) startDiscoveryV5(
|
||||
}
|
||||
wrappedListener, err := newListener(createListener)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
return nil, errors.Wrap(err, "create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
|
||||
@@ -684,7 +698,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -851,7 +865,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
|
||||
}
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -75,7 +76,7 @@ var (
|
||||
tenEpochs = 10 * oneEpochDuration()
|
||||
)
|
||||
|
||||
func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
func peerScoringParams(colocationWhitelist []*net.IPNet) (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
thresholds := &pubsub.PeerScoreThresholds{
|
||||
GossipThreshold: -4000,
|
||||
PublishThreshold: -8000,
|
||||
@@ -83,6 +84,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AcceptPXThreshold: 100,
|
||||
OpportunisticGraftThreshold: 5,
|
||||
}
|
||||
|
||||
scoreParams := &pubsub.PeerScoreParams{
|
||||
Topics: make(map[string]*pubsub.TopicScoreParams),
|
||||
TopicScoreCap: 32.72,
|
||||
@@ -92,7 +94,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AppSpecificWeight: 1,
|
||||
IPColocationFactorWeight: -35.11,
|
||||
IPColocationFactorThreshold: 10,
|
||||
IPColocationFactorWhitelist: nil,
|
||||
IPColocationFactorWhitelist: colocationWhitelist,
|
||||
BehaviourPenaltyWeight: -15.92,
|
||||
BehaviourPenaltyThreshold: 6,
|
||||
BehaviourPenaltyDecay: scoreDecay(tenEpochs),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user