Compare commits

..

2 Commits

Author SHA1 Message Date
terence tsao
5f781a7df6 Turn gossip param 2025-08-02 20:46:56 -07:00
terence tsao
929bbb8209 Implement EIP7805: Fork-choice enforced Inclusion Lists 2025-07-19 14:54:56 -07:00
857 changed files with 29585 additions and 55941 deletions

View File

@@ -1,6 +1,6 @@
name: 🐞 Bug report
description: Report a bug or problem with running Prysm
type: "Bug"
labels: ["Bug"]
body:
- type: markdown
attributes:

View File

@@ -1,4 +1,4 @@
FROM golang:1.25.1-alpine
FROM golang:1.24-alpine
COPY entrypoint.sh /entrypoint.sh

View File

@@ -1,43 +0,0 @@
name: Check Spec References
on: [push, pull_request]
jobs:
check-specrefs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
fi
- name: Install ethspecify
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify process --path=specrefs
- name: Check for differences
run: |
if ! git diff --exit-code specrefs >/dev/null; then
echo "Spec references are out-of-date!"
echo ""
git --no-pager diff specrefs
exit 1
else
echo "Spec references are up-to-date!"
fi
- name: Check spec references
run: ethspecify check --path=specrefs

View File

@@ -16,7 +16,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.25.1'
go-version: '1.23.5'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
with:
@@ -36,7 +36,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.25.1'
go-version: '1.23.5'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}

View File

@@ -31,7 +31,7 @@ jobs:
- name: Set up Go 1.24
uses: actions/setup-go@v4
with:
go-version: '1.25.1'
go-version: '1.24.0'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
@@ -44,27 +44,27 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go 1.25.1
uses: actions/setup-go@v5
- name: Set up Go 1.24
uses: actions/setup-go@v4
with:
go-version: '1.25.1'
go-version: '1.24.0'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v5
with:
version: v2.4
version: v1.64.5
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.25.1
- name: Set up Go 1.x
uses: actions/setup-go@v4
with:
go-version: '1.25.1'
go-version: '1.24.0'
id: go
- name: Check out code into the Go module directory

View File

@@ -1,41 +1,90 @@
version: "2"
run:
go: 1.23.5
linters:
enable:
- errcheck
- ineffassign
- govet
disable:
- staticcheck
- unused
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- validator/web/site_data.go
- .*_test.go
- proto
- tools/analyzers
- third_party$
- builtin$
- examples$
timeout: 10m
go: '1.23.5'
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- validator/web/site_data.go
- .*_test.go
- proto
- tools/analyzers
- third_party$
- builtin$
- examples$
issues:
exclude-files:
- validator/web/site_data.go
- .*_test.go
exclude-dirs:
- proto
- tools/analyzers
linters:
enable-all: true
disable:
# Deprecated linters:
- govet
# Disabled for now:
- asasalint
- bodyclose
- containedctx
- contextcheck
- cyclop
- depguard
- dogsled
- dupl
- durationcheck
- errname
- err113
- exhaustive
- exhaustruct
- forbidigo
- forcetypeassert
- funlen
- gci
- gochecknoglobals
- gochecknoinits
- goconst
- gocritic
- gocyclo
- godot
- godox
- gofumpt
- gomoddirectives
- gosec
- inamedparam
- interfacebloat
- intrange
- ireturn
- lll
- maintidx
- makezero
- mnd
- musttag
- nakedret
- nestif
- nilnil
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosprintfhostport
- perfsprint
- prealloc
- predeclared
- promlinter
- protogetter
- recvcheck
- revive
- spancheck
- staticcheck
- stylecheck
- tagalign
- tagliatelle
- thelper
- unparam
- usetesting
- varnamelen
- wrapcheck
- wsl
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 65
output:
print-issued-lines: true
sort-results: true

View File

@@ -2993,7 +2993,7 @@ There are two known issues with this release:
### Added
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
details.
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for

View File

@@ -2,7 +2,7 @@
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
"go build" project.

View File

@@ -158,15 +158,15 @@ oci_register_toolchains(
http_archive(
name = "io_bazel_rules_go",
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
patch_args = ["-p1"],
patches = [
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "a729c8ed2447c90fe140077689079ca0acfb7580ec41637f312d650ce9d93d96",
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
urls = [
"https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
"https://github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
],
)
@@ -190,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
# A multi-arch base image
oci_pull(
name = "linux_debian11_multiarch_base", # Debian bullseye
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
platforms = [
"linux/amd64",
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.25.1",
go_version = "1.24.0",
nogo = "@//:nogo",
)
@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-beta.0"
consensus_spec_version = "v1.6.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
},
version = consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -300,6 +300,22 @@ filegroup(
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
)
http_archive(
name = "eth2_networks",
build_file_content = """
filegroup(
name = "configs",
srcs = glob([
"shared/**/config.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """
@@ -311,9 +327,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
)
http_archive(
@@ -343,9 +359,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
)
http_archive(
@@ -359,17 +375,17 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
)
http_archive(
name = "com_google_protobuf",
sha256 = "7c3ebd7aaedd86fa5dc479a0fda803f602caaf78d8aff7ce83b89e1b8ae7442a",
strip_prefix = "protobuf-28.3",
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
strip_prefix = "protobuf-25.1",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v28.3.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz",
],
)

View File

@@ -2,28 +2,18 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"common.go",
"header.go",
],
srcs = ["common.go"],
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
visibility = ["//visibility:public"],
deps = [
"//consensus-types/primitives:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
deps = ["//consensus-types/primitives:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = [
"common_test.go",
"header_test.go",
],
srcs = ["common_test.go"],
embed = [":go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,122 +0,0 @@
package apiutil
import (
"mime"
"sort"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
)
type mediaRange struct {
mt string // canonicalised mediatype, e.g. "application/json"
q float64 // quality factor (01)
raw string // original string useful for logging/debugging
spec int // 2=exact, 1=type/*, 0=*/*
}
func parseMediaRange(field string) (mediaRange, bool) {
field = strings.TrimSpace(field)
mt, params, err := mime.ParseMediaType(field)
if err != nil {
log.WithError(err).Debug("Failed to parse header field")
return mediaRange{}, false
}
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
if qs, ok := params["q"]; ok {
v, err := strconv.ParseFloat(qs, 64)
if err != nil || v < 0 || v > 1 {
log.WithField("q", qs).Debug("Invalid quality factor (01)")
return mediaRange{}, false // skip invalid entry
}
r.q = v
}
switch {
case mt == "*/*":
r.spec = 0
case strings.HasSuffix(mt, "/*"):
r.spec = 1
}
return r, true
}
func hasExplicitQ(r mediaRange) bool {
return strings.Contains(strings.ToLower(r.raw), ";q=")
}
// ParseAccept returns media ranges sorted by q (desc) then specificity.
func ParseAccept(header string) []mediaRange {
if header == "" {
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
}
var out []mediaRange
for _, field := range strings.Split(header, ",") {
if r, ok := parseMediaRange(field); ok {
out = append(out, r)
}
}
sort.SliceStable(out, func(i, j int) bool {
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
if ei != ej {
return ei // explicit beats implicit
}
if out[i].q != out[j].q {
return out[i].q > out[j].q
}
return out[i].spec > out[j].spec
})
return out
}
// Matches reports whether content type is acceptable per the header.
func Matches(header, ct string) bool {
for _, r := range ParseAccept(header) {
switch {
case r.q == 0:
continue
case r.mt == "*/*":
return true
case strings.HasSuffix(r.mt, "/*"):
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
return true
}
case r.mt == ct:
return true
}
}
return false
}
// Negotiate selects the best server type according to the header.
// Returns the chosen type and true, or "", false when nothing matches.
func Negotiate(header string, serverTypes []string) (string, bool) {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue
}
for _, s := range serverTypes {
if Matches(r.mt, s) {
return s, true
}
}
}
return "", false
}
// PrimaryAcceptMatches only checks if the first accept matches
func PrimaryAcceptMatches(header, produced string) bool {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue // explicitly unacceptable skip
}
return Matches(r.mt, produced)
}
return false
}

View File

@@ -1,174 +0,0 @@
package apiutil
import (
"testing"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestParseAccept(t *testing.T) {
type want struct {
mt string
q float64
spec int
}
cases := []struct {
name string
header string
want []want
}{
{
name: "empty header becomes */*;q=1",
header: "",
want: []want{{mt: "*/*", q: 1, spec: 0}},
},
{
name: "quality ordering then specificity",
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
want: []want{
{mt: "application/xml", q: 0.5, spec: 2},
{mt: "text/*", q: 0.5, spec: 1},
{mt: "application/json", q: 0.2, spec: 2},
{mt: "*/*", q: 0.1, spec: 0},
},
},
{
name: "invalid pieces are skipped",
header: "text/plain; q=boom, application/json",
want: []want{{mt: "application/json", q: 1, spec: 2}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := ParseAccept(tc.header)
gotProjected := make([]want, len(got))
for i, g := range got {
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
}
require.DeepEqual(t, gotProjected, tc.want)
})
}
}
func TestMatches(t *testing.T) {
cases := []struct {
name string
accept string
ct string
matches bool
}{
{"exact match", "application/json", "application/json", true},
{"type wildcard", "application/*;q=0.8", "application/xml", true},
{"global wildcard", "*/*;q=0.1", "image/png", true},
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
{"no match", "image/png", "application/json", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := Matches(tc.accept, tc.ct)
require.Equal(t, tc.matches, got)
})
}
}
func TestNegotiate(t *testing.T) {
cases := []struct {
name string
accept string
serverTypes []string
wantType string
ok bool
}{
{
name: "highest quality wins",
accept: "application/json;q=0.8,application/xml;q=0.9",
serverTypes: []string{"application/json", "application/xml"},
wantType: "application/xml",
ok: true,
},
{
name: "wildcard matches first server type",
accept: "*/*;q=0.5",
serverTypes: []string{"application/octet-stream", "application/json"},
wantType: "application/octet-stream",
ok: true,
},
{
name: "no acceptable type",
accept: "image/png",
serverTypes: []string{"application/json"},
wantType: "",
ok: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got, ok := Negotiate(tc.accept, tc.serverTypes)
require.Equal(t, tc.ok, ok)
require.Equal(t, tc.wantType, got)
})
}
}
func TestPrimaryAcceptMatches(t *testing.T) {
tests := []struct {
name string
accept string
produced string
expect bool
}{
{
name: "prefers json",
accept: "application/json;q=0.9,application/xml",
produced: "application/json",
expect: true,
},
{
name: "wildcard application beats other wildcard",
accept: "application/*;q=0.2,*/*;q=0.1",
produced: "application/xml",
expect: true,
},
{
name: "json wins",
accept: "application/xml;q=0.8,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json loses",
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
produced: "application/json",
expect: false,
},
{
name: "json wins with non q option",
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json not primary",
accept: "image/png,application/json",
produced: "application/json",
expect: false,
},
{
name: "absent header",
accept: "",
produced: "text/plain",
expect: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := PrimaryAcceptMatches(tc.accept, tc.produced)
require.Equal(t, got, tc.expect)
})
}
}

View File

@@ -16,6 +16,7 @@ go_library(
"//api/server/structs:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
"net/url"
"path"
"regexp"
"sort"
"strconv"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -16,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -135,6 +137,24 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
@@ -314,3 +334,31 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []structs.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -30,11 +30,10 @@ import (
)
const (
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
getStatus = "/eth/v1/builder/status"
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
postBlindedBeaconBlockV2Path = "/eth/v2/builder/blinded_blocks"
postRegisterValidatorPath = "/eth/v1/builder/validators"
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
getStatus = "/eth/v1/builder/status"
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
postRegisterValidatorPath = "/eth/v1/builder/validators"
)
var (
@@ -103,7 +102,6 @@ type BuilderClient interface {
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
Status(ctx context.Context) error
}
@@ -154,8 +152,7 @@ func (c *Client) NodeURL() string {
type reqOption func(*http.Request)
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
// It validates that the HTTP response status matches the expectedStatus parameter.
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
ctx, span := trace.StartSpan(ctx, "builder.client.do")
defer func() {
tracing.AnnotateError(span, err)
@@ -190,8 +187,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
log.WithError(closeErr).Error("Failed to close response body")
}
}()
if r.StatusCode != expectedStatus {
err = unexpectedStatusErr(r, expectedStatus)
if r.StatusCode != http.StatusOK {
err = non200Err(r)
return
}
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
@@ -239,7 +236,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
r.Header.Set("Accept", api.JsonMediaType)
}
}
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
if err != nil {
return nil, errors.Wrap(err, "error getting header from builder server")
}
@@ -412,7 +409,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
}
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
return errors.Wrap(err, "do")
}
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
@@ -474,7 +471,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
// blobs bundle if it is post deneb.
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
}
@@ -504,24 +501,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return ed, blobs, nil
}
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
body, postOpts, err := c.buildBlindedBlockRequest(sb)
if err != nil {
return err
}
// Post the blinded block - the response should only contain a status code (no payload)
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockV2Path, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
if err != nil {
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
}
// Success is indicated by no error (status 202)
return nil
}
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
var versionHeader string
if c.sszEnabled {
@@ -678,11 +657,11 @@ func (c *Client) Status(ctx context.Context) error {
getOpts := func(r *http.Request) {
r.Header.Set("Accept", api.JsonMediaType)
}
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
return err
}
func unexpectedStatusErr(response *http.Response, expected int) error {
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
var errMessage ErrorMessage
var body string
@@ -691,7 +670,7 @@ func unexpectedStatusErr(response *http.Response, expected int) error {
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusUnsupportedMediaType:
log.WithError(ErrUnsupportedMediaType).Debug(msg)

View File

@@ -1555,89 +1555,6 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
}
}
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
ctx := t.Context()
t.Run("success", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
Body: io.NopCloser(bytes.NewBufferString("")),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.NoError(t, err)
})
t.Run("success_ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
Body: io.NopCloser(bytes.NewBufferString("")),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
sszEnabled: true,
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.NoError(t, err)
})
t.Run("error_response", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
message := ErrorMessage{
Code: 400,
Message: "Bad Request",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusBadRequest,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.ErrorIs(t, err, ErrNotOK)
})
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)
@@ -1810,7 +1727,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
t.Run("Interface signature verification", func(t *testing.T) {
// This test verifies that the SubmitBlindedBlock method signature
// has been updated to return BlobsBundler interface
client := &Client{}
// Verify the method exists with the correct signature

View File

@@ -45,11 +45,6 @@ func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySig
return nil, nil, nil
}
// SubmitBlindedBlockPostFulu --
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
return nil
}
// Status --
func (MockClient) Status(_ context.Context) error {
return nil

View File

@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
}
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
func TestErrorMessage_non200Err(t *testing.T) {
mockRequest := &http.Request{
URL: &url.URL{Path: "example.com"},
}
@@ -1779,7 +1779,7 @@ func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := unexpectedStatusErr(tt.args, http.StatusOK)
err := non200Err(tt.args)
if err != nil && tt.wantMessage != "" {
require.ErrorContains(t, tt.wantMessage, err)
}

View File

@@ -1,9 +1,10 @@
package httprest
import (
"net/http"
"time"
"net/http"
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
)

View File

@@ -10,7 +10,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"@com_github_rs_cors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -7,7 +7,6 @@ import (
"strings"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/apiutil"
"github.com/rs/cors"
log "github.com/sirupsen/logrus"
)
@@ -75,10 +74,42 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
acceptHeader := r.Header.Get("Accept")
// header is optional and should skip if not provided
if acceptHeader == "" {
next.ServeHTTP(w, r)
return
}
accepted := false
acceptTypes := strings.Split(acceptHeader, ",")
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
for _, acceptType := range acceptTypes {
acceptType = strings.TrimSpace(acceptType)
if acceptType == "*/*" {
accepted = true
break
}
for _, serverAcceptedType := range serverAcceptedTypes {
if strings.HasPrefix(acceptType, serverAcceptedType) {
accepted = true
break
}
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
accepted = true
break
}
}
if accepted {
break
}
}
if !accepted {
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
})
}

View File

@@ -36,7 +36,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -134,6 +133,66 @@ func SignedBLSChangesFromConsensus(src []*eth.SignedBLSToExecutionChange) []*Sig
return changes
}
func SignedInclusionListFromConsensus(src *eth.SignedInclusionList) *SignedInclusionList {
transactions := make([]string, len(src.Message.Transactions))
for i, transaction := range src.Message.Transactions {
transactions[i] = hexutil.Encode(transaction)
}
return &SignedInclusionList{
Message: &InclusionList{
Slot: fmt.Sprintf("%d", src.Message.Slot),
ValidatorIndex: fmt.Sprintf("%d", src.Message.ValidatorIndex),
InclusionListCommitteeRoot: hexutil.Encode(src.Message.InclusionListCommitteeRoot),
Transactions: transactions,
},
Signature: hexutil.Encode(src.Signature),
}
}
func (s *SignedInclusionList) ToConsensus() (*eth.SignedInclusionList, error) {
message, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
signature, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedInclusionList{
Message: message,
Signature: signature,
}, nil
}
func (s *InclusionList) ToConsensus() (*eth.InclusionList, error) {
slot, err := strconv.ParseUint(s.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
validatorIndex, err := strconv.ParseUint(s.ValidatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorIndex")
}
inclusionListCommitteeRoot, err := bytesutil.DecodeHexWithLength(s.InclusionListCommitteeRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "InclusionListCommitteeRoot")
}
transactions := make([][]byte, len(s.Transactions))
for i, transaction := range s.Transactions {
transactions[i], err = bytesutil.DecodeHexWithMaxLength(transaction, fieldparams.MaxBytesPerTxLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("Transactions[%d]", i))
}
}
return &eth.InclusionList{
Slot: primitives.Slot(slot),
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
InclusionListCommitteeRoot: inclusionListCommitteeRoot,
Transactions: transactions,
}, nil
}
func (s *Fork) ToConsensus() (*eth.Fork, error) {
previousVersion, err := bytesutil.DecodeHexWithLength(s.PreviousVersion, 4)
if err != nil {
@@ -700,11 +759,6 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
// Add validation to check if the signature is valid BLS format
_, err = bls.SignatureFromBytes(sig)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SyncCommitteeMessage{
Slot: primitives.Slot(slot),

View File

@@ -283,16 +283,3 @@ type GetPendingPartialWithdrawalsResponse struct {
Finalized bool `json:"finalized"`
Data []*PendingPartialWithdrawal `json:"data"`
}
type GetProposerLookaheadResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []string `json:"data"` // validator indexes
}
type GetBlobsResponse struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []string `json:"data"` //blobs
}

View File

@@ -56,19 +56,3 @@ type ForkChoiceNodeExtraData struct {
TimeStamp string `json:"timestamp"`
Target string `json:"target"`
}
type GetDebugDataColumnSidecarsResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*DataColumnSidecar `json:"data"`
}
type DataColumnSidecar struct {
Index string `json:"index"`
Column []string `json:"column"`
KzgCommitments []string `json:"kzg_commitments"`
KzgProofs []string `json:"kzg_proofs"`
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
KzgCommitmentsInclusionProof []string `json:"kzg_commitments_inclusion_proof"`
}

View File

@@ -103,6 +103,11 @@ type BlobSidecarEvent struct {
VersionedHash string `json:"versioned_hash"`
}
type InclusionListEvent struct {
Version string `json:"version"`
Data *SignedInclusionList `json:"data"`
}
type LightClientFinalityUpdateEvent struct {
Version string `json:"version"`
Data *LightClientFinalityUpdate `json:"data"`

View File

@@ -27,8 +27,6 @@ type Identity struct {
type Metadata struct {
SeqNumber string `json:"seq_number"`
Attnets string `json:"attnets"`
Syncnets string `json:"syncnets,omitempty"`
Cgc string `json:"custody_group_count,omitempty"`
}
type GetPeerResponse struct {

View File

@@ -262,3 +262,15 @@ type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}
type SignedInclusionList struct {
Message *InclusionList `json:"message"`
Signature string `json:"signature"`
}
type InclusionList struct {
Slot string `json:"slot"`
ValidatorIndex string `json:"validator_index"`
InclusionListCommitteeRoot string `json:"inclusion_list_committee_root"`
Transactions []string `json:"transactions"`
}

View File

@@ -12,6 +12,7 @@ go_library(
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"inclusion_list.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
@@ -27,7 +28,7 @@ go_library(
"receive_block.go",
"receive_data_column.go",
"service.go",
"setup_forkchoice.go",
"setup_forchoice.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
],
@@ -50,6 +51,7 @@ go_library(
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
@@ -62,7 +64,6 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
@@ -73,7 +74,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -148,6 +148,7 @@ go_test(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -160,7 +161,6 @@ go_test(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
@@ -182,7 +182,6 @@ go_test(
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -196,7 +195,6 @@ go_test(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -42,6 +42,7 @@ type ForkchoiceFetcher interface {
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(time.Time)
GetAttesterHead() [32]byte
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)

View File

@@ -27,6 +27,13 @@ func (s *Service) GetProposerHead() [32]byte {
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// GetAttesterHead returns the corresponding value from forkchoice
func (s *Service) GetAttesterHead() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.GetAttesterHead()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
s.cfg.ForkChoiceStore.Lock()

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -625,7 +624,6 @@ func Test_hashForGenesisRoot(t *testing.T) {
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
st, _ := util.DeterministicGenesisStateElectra(t, 10)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
root, err := beaconDB.GenesisBlockRoot(ctx)
require.NoError(t, err)

View File

@@ -7,15 +7,10 @@ type currentlySyncingBlock struct {
roots map[[32]byte]struct{}
}
func (b *currentlySyncingBlock) set(root [32]byte) error {
func (b *currentlySyncingBlock) set(root [32]byte) {
b.Lock()
defer b.Unlock()
_, ok := b.roots[root]
if ok {
return errBlockBeingSynced
}
b.roots[root] = struct{}{}
return nil
}
func (b *currentlySyncingBlock) unset(root [32]byte) {

View File

@@ -44,8 +44,6 @@ var (
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
// errBlockBeingSynced is returned when a block is being synced.
errBlockBeingSynced = errors.New("block is being synced")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -16,7 +16,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -174,7 +174,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
@@ -218,18 +217,24 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if stVersion < version.Bellatrix {
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
@@ -263,31 +268,40 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
if err == nil {
var txs [][]byte
// Post-FOCIL, only consider the inclusion list constraint if it matches the current slot.
if slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().Eip7805ForkEpoch && s.CurrentSlot() == blk.Block().Slot() {
txs = s.inclusionListCache.Get(blk.Block().Slot() - 1)
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests, txs)
switch {
case err == nil:
newPayloadValidNodeCount.Inc()
return true, nil
}
logFields := logrus.Fields{
"slot": blk.Block().Slot(),
"parentRoot": fmt.Sprintf("%#x", parentRoot),
"root": fmt.Sprintf("%#x", blk.Root()),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
case errors.Is(err, execution.ErrBadInclusionListPayloadStatus):
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"parentRoot": fmt.Sprintf("%#x", parentRoot),
}).Info("Called new payload but inclusion list didn't satisfy")
blk.Block().MarkInclusionListNotSatisfied() // Cache the block root that fails to satisfy the inclusion list constraint.
return true, nil
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logFields).Info("Called new payload with optimistic block")
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
case errors.Is(err, execution.ErrInvalidPayloadStatus):
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: bytesutil.ToBytes32(lastValidHash),
lastValidHash: lvh,
}
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer

View File

@@ -19,7 +19,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -310,7 +309,6 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
block: wba,
}
genesis.StoreStateDuringTest(t, st)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &fcuConfig{
@@ -405,7 +403,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
@@ -481,12 +478,33 @@ func Test_NotifyNewPayload(t *testing.T) {
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
a := util.NewBeaconBlockAltair()
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
},
}
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
require.NoError(t, err)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
blk.Block.Body.ExecutionPayload.BlockNumber = 1
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
BlockNumber: 1,
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
},
},
},
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
@@ -523,6 +541,12 @@ func Test_NotifyNewPayload(t *testing.T) {
blk: altairBlk,
isValidPayload: true,
},
{
name: "nil beacon block",
postState: bellatrixState,
errString: "signed beacon block can't be nil",
isValidPayload: false,
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
@@ -549,8 +573,15 @@ func Test_NotifyNewPayload(t *testing.T) {
name: "altair pre state, happy case",
postState: bellatrixState,
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
return b
@@ -561,7 +592,24 @@ func Test_NotifyNewPayload(t *testing.T) {
name: "not at merge transition",
postState: bellatrixState,
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
},
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
return b
@@ -572,8 +620,15 @@ func Test_NotifyNewPayload(t *testing.T) {
name: "happy case",
postState: bellatrixState,
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
return b
@@ -584,8 +639,15 @@ func Test_NotifyNewPayload(t *testing.T) {
name: "undefined error from ee",
postState: bellatrixState,
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
return b
@@ -597,8 +659,15 @@ func Test_NotifyNewPayload(t *testing.T) {
name: "invalid block hash error from ee",
postState: bellatrixState,
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
return b
@@ -629,9 +698,7 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
require.NoError(t, err)
rob, err := consensusblocks.NewROBlock(tt.blk)
require.NoError(t, err)
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
if tt.invalidBlock {
@@ -655,12 +722,17 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
ctx := tr.ctx
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
require.NoError(t, err)
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
Header: gethtypes.Header{
@@ -677,7 +749,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
service.cfg.ExecutionEngineCaller = e
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
require.NoError(t, err)
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
require.NoError(t, err)
require.Equal(t, true, validated)
}

View File

@@ -102,6 +102,8 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
log.WithError(err).Error("Could not save head")
}
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil

View File

@@ -0,0 +1,72 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/sirupsen/logrus"
)
const updateInclusionListBlockInterval = time.Second
// Routine that updates block building with inclusion lists one second before the slot starts.
func (s *Service) updateBlockWithInclusionListRoutine() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Failed to wait for initial sync")
return
}
interval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - updateInclusionListBlockInterval
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{interval})
for {
select {
case <-s.ctx.Done():
return
case <-ticker.C():
s.updateBlockWithInclusionList(context.Background())
}
}
}
// Updates block building with inclusion lists, the current payload ID, and the new upload ID.
func (s *Service) updateBlockWithInclusionList(ctx context.Context) {
currentSlot := s.CurrentSlot()
// Skip update if not in or past the FOCIL fork epoch.
if slots.ToEpoch(currentSlot) < params.BeaconConfig().Eip7805ForkEpoch {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
headRoot := s.headRoot()
id, found := s.cfg.PayloadIDCache.PayloadID(currentSlot+1, headRoot)
if !found {
return
}
txs := s.inclusionListCache.Get(currentSlot)
if len(txs) == 0 {
log.WithField("slot", currentSlot).Warn("No inclusion list transactions found to update block")
return
}
newID, err := s.cfg.ExecutionEngineCaller.UpdatePayloadWithInclusionList(ctx, id, txs)
if err != nil {
log.WithError(err).Error("Failed to update block with inclusion list")
return
}
log.WithFields(logrus.Fields{
"slot": currentSlot,
"headRoot": fmt.Sprintf("%x", headRoot),
"txs": len(txs),
}).Info("Updated block with inclusion list")
s.cfg.PayloadIDCache.Set(currentSlot+1, headRoot, *newID)
}

View File

@@ -14,10 +14,7 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
type Blob [BytesPerBlob]byte
// BytesPerCell is the number of bytes in a single cell.
const (
BytesPerCell = ckzg4844.BytesPerCell
BytesPerProof = ckzg4844.BytesPerProof
)
const BytesPerCell = ckzg4844.BytesPerCell
// Cell represents a chunk of an encoded Blob.
type Cell [BytesPerCell]byte
@@ -26,7 +23,7 @@ type Cell [BytesPerCell]byte
type Commitment [48]byte
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
type Proof [BytesPerProof]byte
type Proof [48]byte
// Bytes48 is a 48-byte array.
type Bytes48 = ckzg4844.Bytes48
@@ -105,6 +102,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
for i := range cells {
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -11,7 +11,7 @@ import (
)
var (
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
//go:embed trusted_setup_4096.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context

View File

@@ -1,14 +1,32 @@
package kzg
import (
"fmt"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/pkg/errors"
)
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
return nil
}
if len(sidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(sidecars[0].Blob),
bytesToCommitment(sidecars[0].KzgCommitment),
bytesToKZGProof(sidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(sidecars))
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
@@ -24,144 +42,3 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
return nil
}
if len(blobSidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(blobSidecars[0].Blob),
bytesToCommitment(blobSidecars[0].KzgCommitment),
bytesToKZGProof(blobSidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(blobSidecars))
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
for i, sidecar := range blobSidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
// This is more efficient than verifying each blob individually when len(blobs) > 1.
// For single blob verification, it uses the optimized single verification path.
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
}
if len(blobs) == 0 {
return nil
}
// Optimize for single blob case - use single verification to avoid batch overhead
if len(blobs) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(blobs[0]),
bytesToCommitment(commitments[0]),
bytesToKZGProof(proofs[0]))
}
// Use batch verification for multiple blobs
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
for i := range blobs {
if len(blobs[i]) != len(ckzg4844.Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
}
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
}
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
}
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
}
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
if err != nil {
return errors.Wrap(err, "batch verification")
}
if !valid {
return errors.New("batch KZG proof verification failed")
}
return nil
}
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
blobCount := uint64(len(blobs))
expectedCellProofs := blobCount * numberOfColumns
if uint64(len(cellProofs)) != expectedCellProofs {
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
}
if len(commitments) != len(blobs) {
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
}
if blobCount == 0 {
return nil
}
// Handle multiple blobs - compute cells for all blobs
allCells := make([]Cell, 0, expectedCellProofs)
allCommitments := make([]Bytes48, 0, expectedCellProofs)
allIndices := make([]uint64, 0, expectedCellProofs)
allProofs := make([]Bytes48, 0, expectedCellProofs)
for blobIndex := range blobs {
if len(blobs[blobIndex]) != len(Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
}
// Convert blob to kzg.Blob type
blob := Blob(blobs[blobIndex])
// Compute cells for this blob
cells, err := ComputeCells(&blob)
if err != nil {
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
}
// Add cells and corresponding data for each column
for columnIndex := range numberOfColumns {
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
if len(commitments[blobIndex]) != len(Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
}
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
}
allCells = append(allCells, cells[columnIndex])
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
allIndices = append(allIndices, columnIndex)
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
}
}
// Batch verify all cells
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
if err != nil {
return errors.Wrap(err, "cell batch verification")
}
if !valid {
return errors.New("cell KZG proof batch verification failed")
}
return nil
}

View File

@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
}
func TestVerify(t *testing.T) {
blobSidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(blobSidecars...))
sidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(sidecars...))
}
func TestBytesToAny(t *testing.T) {
@@ -37,7 +37,6 @@ func TestBytesToAny(t *testing.T) {
}
func TestGenerateCommitmentAndProof(t *testing.T) {
require.NoError(t, Start())
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
@@ -46,432 +45,3 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
require.Equal(t, expectedCommitment, commitment)
require.Equal(t, expectedProof, proof)
}
func TestVerifyBlobKZGProofBatch(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob batch", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("valid multiple blob batch", func(t *testing.T) {
blobCount := 3
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
proofs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := random.GetRandBlob(int64(i))
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
proofs[i] = proof[:]
}
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
require.NoError(t, err)
})
t.Run("mismatched input lengths", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Test different mismatch scenarios
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:]},
[][]byte{},
[][]byte{proof[:]},
)
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:], blob[:]},
[][]byte{commitment[:]},
[][]byte{proof[:], proof[:]},
)
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
})
t.Run("invalid commitment should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
_, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use a different blob's commitment (mismatch)
differentBlob := random.GetRandBlob(456)
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
// Single blob optimization uses different error message
require.ErrorContains(t, "can't verify opening proof", err)
})
t.Run("invalid proof should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, _, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use wrong proof
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "short buffer", err)
})
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
// First blob - valid
blob1 := random.GetRandBlob(123)
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Second blob - invalid proof
blob2 := random.GetRandBlob(456)
commitment2, _, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment2[:]}
proofs := [][]byte{proof1[:], invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch verification", err)
})
t.Run("batch KZG proof verification failed", func(t *testing.T) {
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
blob1 := random.GetRandBlob(123)
blob2 := random.GetRandBlob(456)
// Generate valid proof for blob1
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Generate valid proof for blob2 but use wrong commitment (from blob1)
_, proof2, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
proofs := [][]byte{proof1[:], proof2[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch KZG proof verification failed", err)
})
}
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and commitment
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Create flattened cell proofs (like execution client format)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("valid multiple blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := range blobCount {
// Generate blob and commitment
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs for this blob
for j := range numberOfColumns {
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
require.NoError(t, err)
})
t.Run("mismatched blob and commitment count", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
err := VerifyCellKZGProofBatchFromBlobData(
[][]byte{blob[:]},
[][]byte{}, // Empty commitments
[][]byte{},
128,
)
require.ErrorContains(t, "expected 128 cell proofs", err)
})
t.Run("wrong cell proof count", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
wrongCellProofs := make([][]byte, 10)
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
})
t.Run("invalid cell proofs should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Create invalid cell proofs (all zeros)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
invalidCellProofs[i] = make([]byte, 48) // All zeros
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "cell batch verification", err)
})
t.Run("mismatched commitment should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and correct cell proofs
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Generate wrong commitment from different blob
randBlob2 := random.GetRandBlob(456)
var differentBlob Blob
copy(differentBlob[:], randBlob2[:])
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
require.NoError(t, err)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
})
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Create invalid blob (not properly formatted)
invalidBlobData := make([]byte, 10) // Too short
commitment := make([]byte, 48) // Dummy commitment
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{invalidBlobData}
commitments := [][]byte{commitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NotNil(t, err)
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
})
t.Run("invalid commitment size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
// Create invalid commitment (wrong size)
invalidCommitment := make([]byte, 32) // Should be 48 bytes
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{invalidCommitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("invalid cell proof size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Create invalid cell proofs (wrong size)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
if i == 0 {
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
} else {
invalidCellProofs[i] = make([]byte, 48)
}
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
// First blob - valid
randBlob1 := random.GetRandBlob(123)
var blob1 Blob
copy(blob1[:], randBlob1[:])
commitment1, err := BlobToKZGCommitment(&blob1)
require.NoError(t, err)
blobs[0] = blob1[:]
commitments[0] = commitment1[:]
// Second blob - use invalid commitment size
randBlob2 := random.GetRandBlob(456)
var blob2 Blob
copy(blob2[:], randBlob2[:])
blobs[1] = blob2[:]
commitments[1] = make([]byte, 32) // Wrong size
// Add cell proofs for both blobs
for i := 0; i < blobCount; i++ {
for j := uint64(0); j < numberOfColumns; j++ {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := 0; i < blobCount; i++ {
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs - make some invalid in the second blob
for j := uint64(0); j < numberOfColumns; j++ {
if i == 1 && j == 64 {
// Invalid proof size in middle of second blob's proofs
allCellProofs = append(allCellProofs, make([]byte, 20))
} else {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
})
}

View File

@@ -6,11 +6,12 @@ import (
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
@@ -35,7 +36,7 @@ func WithMaxGoroutines(x int) Option {
// WithLCStore for light client store access.
func WithLCStore() Option {
return func(s *Service) error {
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
return nil
}
}
@@ -234,6 +235,14 @@ func WithSyncChecker(checker Checker) Option {
}
}
// WithCustodyInfo sets the custody info for the blockchain service.
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
return func(s *Service) error {
s.cfg.CustodyInfo = custodyInfo
return nil
}
}
// WithSlasherEnabled sets whether the slasher is enabled or not.
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {
@@ -266,3 +275,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithInclusionListCache(c *cache.InclusionLists) Option {
return func(s *Service) error {
s.inclusionListCache = c
return nil
}
}

View File

@@ -3,6 +3,7 @@ package blockchain
import (
"context"
"fmt"
"slices"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
@@ -158,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
}
@@ -239,14 +240,13 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
}
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[i] = args
pendingNodes[len(blks)-i-1] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return err
@@ -283,10 +283,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
}
// Insert all nodes to forkchoice
// Insert all nodes but the last one to forkchoice
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
// Insert the last block to forkchoice
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
}
// Set their optimistic status
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
@@ -304,30 +308,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()
if blockVersion >= version.Fulu {
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
}
return nil
}
if blockVersion >= version.Deneb {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
}
return nil
}
return nil
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -604,7 +584,7 @@ func (s *Service) runLateBlockTasks() {
// It returns a map where each key represents a missing BlobSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// BlobSidecars against the set of known missing sidecars.
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(expected) == 0 {
return nil, nil
@@ -612,7 +592,7 @@ func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLen
if len(expected) > maxBlobsPerBlock {
return nil, errMaxBlobsExceeded
}
indices := store.Summary(root)
indices := bs.Summary(root)
missing := make(map[uint64]bool, len(expected))
for i := range expected {
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
@@ -627,7 +607,7 @@ func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLen
// It returns a map where each key represents a missing DataColumnSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// DataColumns against the set of known missing sidecars.
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
@@ -639,7 +619,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
}
// Get a summary of the data columns stored in the database.
summary := store.Summary(root)
summary := bs.Summary(root)
// Check all expected data columns against the summary.
missing := make(map[uint64]bool)
@@ -659,14 +639,14 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(
ctx context.Context,
roBlock consensusblocks.ROBlock,
root [fieldparams.RootLength]byte,
signedBlock interfaces.ReadOnlySignedBeaconBlock,
) error {
block := roBlock.Block()
block := signedBlock.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
@@ -686,9 +666,10 @@ func (s *Service) areDataColumnsAvailable(
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
) error {
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
@@ -709,21 +690,16 @@ func (s *Service) areDataColumnsAvailable(
}
// All columns to sample need to be available for the block to be considered available.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
nodeID := s.cfg.P2P.NodeID()
// Prevent custody group count to change during the rest of the function.
s.cfg.CustodyInfo.Mut.RLock()
defer s.cfg.CustodyInfo.Mut.RUnlock()
// Get the custody group sampling size for the node.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count")
}
// Compute the sampling size.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
if err != nil {
return errors.Wrap(err, "peer info")
}
@@ -736,7 +712,7 @@ func (s *Service) areDataColumnsAvailable(
summary := s.dataColumnStorage.Summary(root)
storedDataColumnsCount := summary.Count()
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
@@ -745,14 +721,14 @@ func (s *Service) areDataColumnsAvailable(
}
// Get a map of data column indices that are not currently available.
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
if err != nil {
return errors.Wrap(err, "missing data columns")
}
// If there are no missing indices, all data column sidecars are available.
// This is the happy path.
if len(missing) == 0 {
if len(missingMap) == 0 {
return nil
}
@@ -769,17 +745,33 @@ func (s *Service) areDataColumnsAvailable(
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
timer := time.AfterFunc(time.Until(nextSlot), func() {
missingCount := uint64(len(missing))
missingMapCount := uint64(len(missingMap))
if missingCount == 0 {
if missingMapCount == 0 {
return
}
var (
expected interface{} = "all"
missing interface{} = "all"
)
numberOfColumns := params.BeaconConfig().NumberOfColumns
colMapCount := uint64(len(peerInfo.CustodyColumns))
if colMapCount < numberOfColumns {
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
}
if missingMapCount < numberOfColumns {
missing = uint64MapToSortedSlice(missingMap)
}
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"root": fmt.Sprintf("%#x", root),
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
"columnsExpected": expected,
"columnsWaiting": missing,
}).Warning("Data columns still missing at slot end")
})
defer timer.Stop()
@@ -795,7 +787,7 @@ func (s *Service) areDataColumnsAvailable(
for _, index := range idents.Indices {
// This is a data column we are expecting.
if _, ok := missing[index]; ok {
if _, ok := missingMap[index]; ok {
storedDataColumnsCount++
}
@@ -806,10 +798,10 @@ func (s *Service) areDataColumnsAvailable(
}
// Remove the index from the missing map.
delete(missing, index)
delete(missingMap, index)
// Return if there is no more missing data columns.
if len(missing) == 0 {
if len(missingMap) == 0 {
return nil
}
}
@@ -817,13 +809,13 @@ func (s *Service) areDataColumnsAvailable(
case <-ctx.Done():
var missingIndices interface{} = "all"
numberOfColumns := params.BeaconConfig().NumberOfColumns
missingIndicesCount := uint64(len(missing))
missingIndicesCount := uint64(len(missingMap))
if missingIndicesCount < numberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
missingIndices = uint64MapToSortedSlice(missingMap)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
}
}
}
@@ -904,6 +896,16 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
}
}
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
output := make([]uint64, 0, len(input))
for idx := range input {
output = append(output, idx)
}
slices.Sort[[]uint64](output)
return output
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -1,14 +1,17 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
@@ -28,10 +31,6 @@ import (
"github.com/sirupsen/logrus"
)
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() primitives.Slot {
return slots.CurrentSlot(s.genesisTime)
@@ -131,26 +130,35 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
})
}
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
if err := s.processLightClientUpdate(cfg); err != nil {
log.WithError(err).Error("Failed to process light client update")
}
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Failed to process light client optimistic update")
}
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Failed to process light client finality update")
}
}
// processLightClientUpdate saves the light client update for this block
// if it's better than the already saved one, when feature flag is enabled.
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
attestedRoot := cfg.roblock.Block().ParentRoot()
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
return
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
if attestedBlock == nil || attestedBlock.IsNil() {
log.Error("processLightClientUpdates: Could not get attested block")
return
return errors.New("attested block is nil")
}
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
return
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
if attestedState == nil || attestedState.IsNil() {
log.Error("processLightClientUpdates: Could not get attested state")
return
return errors.New("attested state is nil")
}
finalizedRoot := attestedState.FinalizedCheckpoint().Root
@@ -158,17 +166,161 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
return
return nil
}
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
return
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
update, err := lightclient.NewLightClientUpdateFromBeaconState(
cfg.ctx,
s.CurrentSlot(),
cfg.postState,
cfg.roblock,
attestedState,
attestedBlock,
finalizedBlock,
)
if err != nil {
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
return errors.Wrapf(err, "could not create light client update")
}
log.Debug("Processed light client updates")
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
}
func (s *Service) processLightClientFinalityUpdate(
ctx context.Context,
signed interfaces.ReadOnlySignedBeaconBlock,
postState state.BeaconState,
) error {
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
// Check if the finalized checkpoint has changed
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
return nil
}
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
return nil
}
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
finalizedBlock,
)
if err != nil {
return errors.Wrap(err, "could not create light client finality update")
}
lastUpdate := s.lcStore.LastFinalityUpdate()
if lastUpdate != nil {
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
if newUpdateSlot < lastUpdateSlot {
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
return nil
}
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
log.Debug("Skip saving light client finality update: No supermajority advantage")
return nil
}
}
log.Debug("Saving new light client finality update")
s.lcStore.SetLastFinalityUpdate(newUpdate)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientFinalityUpdate,
Data: newUpdate,
})
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
return errors.Wrap(err, "could not broadcast light client finality update")
}
return nil
}
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
postState state.BeaconState) error {
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
)
if err != nil {
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
log.WithError(err).Debug("Skipping processing light client optimistic update")
return nil
}
return errors.Wrap(err, "could not create light client optimistic update")
}
lastUpdate := s.lcStore.LastOptimisticUpdate()
if lastUpdate != nil {
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
log.Debug("Skip saving light client optimistic update: Older than local update")
return nil
}
}
log.Debug("Saving new light client optimistic update")
s.lcStore.SetLastOptimisticUpdate(newUpdate)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientOptimisticUpdate,
Data: newUpdate,
})
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
return errors.Wrap(err, "could not broadcast light client optimistic update")
}
return nil
}
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
@@ -366,9 +518,6 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if fCheckpoint.Epoch > jCheckpoint.Epoch {
return ErrInvalidCheckpointArgs
}
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
// Fork choice only matters from last finalized slot.
@@ -377,8 +526,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
if err != nil {
return err
}
root := signed.Block().ParentRoot()
// The first block can have a bogus root since the block is not inserted in forkchoice
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
if err != nil {
return err
}
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
// As long as parent node is not in fork choice store, and parent node is in DB.
root := roblock.Block().ParentRoot()
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
if err != nil {
@@ -397,13 +553,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
FinalizedCheckpoint: fCheckpoint}
pendingNodes = append(pendingNodes, args)
}
if len(pendingNodes) == 0 {
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
return ErrNotDescendantOfFinalized
}
slices.Reverse(pendingNodes)
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -23,7 +24,6 @@ import (
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -35,7 +35,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -375,81 +374,6 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
tests := []struct {
name string
finalizedEpoch primitives.Epoch
justifiedEpoch primitives.Epoch
expectedError error
}{
{
name: "finalized epoch greater than justified epoch",
finalizedEpoch: 5,
justifiedEpoch: 3,
expectedError: ErrInvalidCheckpointArgs,
},
{
name: "valid case - finalized equal to justified",
finalizedEpoch: 3,
justifiedEpoch: 3,
expectedError: nil,
},
{
name: "valid case - finalized less than justified",
finalizedEpoch: 2,
justifiedEpoch: 3,
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
// Create a simple block for testing
blk := util.NewBeaconBlock()
blk.Block.Slot = 10
blk.Block.ParentRoot = service.originBlockRoot[:]
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, blk)
// Create checkpoints with test case epochs
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.finalizedEpoch,
Root: service.originBlockRoot[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.justifiedEpoch,
Root: service.originBlockRoot[:],
}
// Set up forkchoice store to avoid other errors
fcp := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
err = service.fillInForkChoiceMissingBlocks(
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
if tt.expectedError != nil {
require.ErrorIs(t, err, tt.expectedError)
} else {
// For valid cases, we might get other errors (like block not being descendant of finalized)
// but we shouldn't get the checkpoint validation error
if err != nil && errors.Is(err, tt.expectedError) {
t.Errorf("Unexpected checkpoint validation error: %v", err)
}
}
})
}
}
// blockTree1 constructs the following tree:
//
// /- B1
@@ -2056,15 +1980,14 @@ func TestNoViableHead_Reboot(t *testing.T) {
genesisState, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := genesisState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
gb := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
genesisRoot, err := gb.Block.HashTreeRoot()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
require.NoError(t, service.saveGenesisData(ctx, genesisState))
genesis.StoreStateDuringTest(t, genesisState)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
@@ -2207,13 +2130,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Forkchoice has the genesisRoot loaded at startup
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
// Service's store has the finalized state as headRoot
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
require.Equal(t, false, optimistic)
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint
@@ -2795,11 +2718,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
s, tr := minimalTestService(t, WithLCStore())
ctx := tr.ctx
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
@@ -2822,8 +2740,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
@@ -2838,9 +2754,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
t.Run("no old update", func(t *testing.T) {
s.processLightClientUpdates(cfg)
require.NoError(t, s.processLightClientUpdate(cfg))
// Check that the light client update is saved
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
@@ -2854,12 +2771,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.processLightClientUpdates(cfg)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
@@ -2883,12 +2800,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.processLightClientUpdates(cfg)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
@@ -2958,52 +2875,46 @@ func TestIsDataAvailable(t *testing.T) {
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - no missing data columns", func(t *testing.T) {
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
@@ -3011,7 +2922,7 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
testParams := testIsAvailableParams{
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
blobKzgCommitmentsCount: 3,
@@ -3048,12 +2959,7 @@ func TestIsDataAvailable(t *testing.T) {
require.NoError(t, err)
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err = service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
@@ -3065,7 +2971,11 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
var custodyInfo peerdas.CustodyInfo
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
for i := range minimumColumnsCountToReconstruct - missingColumns {
@@ -3073,14 +2983,12 @@ func TestIsDataAvailable(t *testing.T) {
}
testParams := testIsAvailableParams{
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
require.NoError(t, err)
block := signed.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
@@ -3112,12 +3020,7 @@ func TestIsDataAvailable(t *testing.T) {
require.NoError(t, err)
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err = service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
@@ -3125,7 +3028,7 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
params := testIsAvailableParams{
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
blobKzgCommitmentsCount: 3,
}
@@ -3136,9 +3039,7 @@ func TestIsDataAvailable(t *testing.T) {
cancel()
}()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
err := service.isDataAvailable(ctx, root, signed)
require.NotNil(t, err)
})
}
@@ -3210,11 +3111,6 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
s.cfg.P2P = &mockp2p.FakeP2P{}
ctx := tr.ctx
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
testCases := []struct {
name string
oldOptions []util.LightClientOption
@@ -3230,7 +3126,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
{
name: "Same age",
oldOptions: []util.LightClientOption{},
newOptions: []util.LightClientOption{util.WithSupermajority(0)}, // supermajority does not matter here and is only added to result in two different updates
newOptions: []util.LightClientOption{util.WithSupermajority()}, // supermajority does not matter here and is only added to result in two different updates
expectReplace: false,
},
{
@@ -3274,16 +3170,23 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
s.lcStore = &lightClient.Store{}
var oldActualUpdate interfaces.LightClientOptimisticUpdate
var err error
if tc.oldOptions != nil {
// config for old update
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
s.processLightClientUpdates(cfgOld)
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(
lOld.Ctx,
lOld.State.Slot(),
lOld.State,
lOld.Block,
lOld.AttestedState,
lOld.AttestedBlock,
)
require.NoError(t, err)
// check that the old update is saved
@@ -3295,9 +3198,16 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
// config for new update
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
s.processLightClientUpdates(cfgNew)
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(
lNew.Ctx,
lNew.State.Slot(),
lNew.State,
lNew.Block,
lNew.AttestedState,
lNew.AttestedBlock,
)
require.NoError(t, err)
require.DeepNotEqual(t, newActualUpdate, oldActualUpdate, "new update should not be equal to old update")
@@ -3336,7 +3246,6 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
s, tr := minimalTestService(t)
s.cfg.P2P = &mockp2p.FakeP2P{}
ctx := tr.ctx
s.head = &head{}
testCases := []struct {
name string
@@ -3351,39 +3260,39 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
expectReplace: true,
},
{
name: "Old update is better - finalized slot is higher",
name: "Old update is better - age - no supermajority",
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
newOptions: []util.LightClientOption{},
expectReplace: false,
},
{
name: "Old update is better - attested slot is higher",
oldOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
name: "Old update is better - age - both supermajority",
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
newOptions: []util.LightClientOption{util.WithSupermajority()},
expectReplace: false,
},
{
name: "Old update is better - supermajority",
oldOptions: []util.LightClientOption{util.WithSupermajority()},
newOptions: []util.LightClientOption{},
expectReplace: false,
},
{
name: "Old update is better - signature slot is higher",
oldOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
newOptions: []util.LightClientOption{},
expectReplace: false,
},
{
name: "New update is better - finalized slot is higher",
oldOptions: []util.LightClientOption{},
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
name: "New update is better - age - both supermajority",
oldOptions: []util.LightClientOption{util.WithSupermajority()},
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
expectReplace: true,
},
{
name: "New update is better - attested slot is higher",
name: "New update is better - age - no supermajority",
oldOptions: []util.LightClientOption{},
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
expectReplace: true,
},
{
name: "New update is better - signature slot is higher",
name: "New update is better - supermajority",
oldOptions: []util.LightClientOption{},
newOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
newOptions: []util.LightClientOption{util.WithSupermajority()},
expectReplace: true,
},
}
@@ -3415,21 +3324,26 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
s.lcStore = &lightClient.Store{}
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
var err error
if tc.oldOptions != nil {
// config for old update
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
blkRoot, err := lOld.Block.Block().HashTreeRoot()
require.NoError(t, err)
s.head.block = lOld.Block
s.head.root = blkRoot
s.processLightClientUpdates(cfgOld)
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
// check that the old update is saved
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
cfgOld.postState.Slot(),
cfgOld.postState,
cfgOld.roblock,
lOld.AttestedState,
lOld.AttestedBlock,
lOld.FinalizedBlock,
)
require.NoError(t, err)
oldUpdate := s.lcStore.LastFinalityUpdate()
require.DeepEqual(t, actualOldUpdate, oldUpdate)
@@ -3437,14 +3351,18 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
// config for new update
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
blkRoot, err := lNew.Block.Block().HashTreeRoot()
require.NoError(t, err)
s.head.block = lNew.Block
s.head.root = blkRoot
s.processLightClientUpdates(cfgNew)
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
// check that the actual old update and the actual new update are different
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
cfgNew.postState.Slot(),
cfgNew.postState,
cfgNew.roblock,
lNew.AttestedState,
lNew.AttestedBlock,
lNew.FinalizedBlock,
)
require.NoError(t, err)
require.DeepNotEqual(t, actualOldUpdate, actualNewUpdate)

View File

@@ -108,6 +108,14 @@ func (s *Service) spawnProcessAttestationsRoutine() {
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
// Prune inclusion list that's more than 1 epoch old.
// Mean at the second 0 of slot 100, we prune the inclusion list of slot 98.
cachedSlot := primitives.Slot(0)
if slotInterval.Slot > 2 {
cachedSlot = slotInterval.Slot - 2
}
s.inclusionListCache.Delete(cachedSlot)
}
}
}
@@ -177,7 +185,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.GetData().Slot + 1
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
continue

View File

@@ -16,6 +16,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -83,16 +84,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
receivedTime := time.Now()
err := s.blockBeingSynced.set(blockRoot)
if errors.Is(err, errBlockBeingSynced) {
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
return nil
}
s.blockBeingSynced.set(blockRoot)
defer s.blockBeingSynced.unset(blockRoot)
blockCopy, err := block.Copy()
if err != nil {
return errors.Wrap(err, "block copy")
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
@@ -103,17 +100,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
currentCheckpoints := s.saveCurrentCheckpoints(preState)
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "new ro block with root")
return err
}
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
if err != nil {
return errors.Wrap(err, "validator execution and consensus")
return err
}
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
if err != nil {
return errors.Wrap(err, "handle da")
return err
}
// Defragment the state before continuing block processing.
@@ -134,10 +131,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.postBlockProcess(args); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return errors.Wrap(err, "post block process")
return err
}
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
return errors.Wrap(err, "update checkpoints")
return err
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if s.slasherEnabled {
@@ -151,12 +148,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
return errors.Wrap(err, "check save hot state db")
return err
}
// We apply the same heuristic to some of our more important caches.
if err := s.handleCaches(); err != nil {
return errors.Wrap(err, "handle caches")
return err
}
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
return nil
@@ -239,19 +236,37 @@ func (s *Service) validateExecutionAndConsensus(
return postState, isValidPayload, nil
}
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
var err error
start := time.Now()
if avs != nil {
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
} else {
err = s.isDataAvailable(ctx, block)
func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [fieldparams.RootLength]byte,
avs das.AvailabilityStore,
) (elapsed time.Duration, err error) {
defer func(start time.Time) {
elapsed = time.Since(start)
if err == nil {
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
}
}(time.Now())
if avs == nil {
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
return
}
return
}
elapsed := time.Since(start)
if err == nil {
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
var rob blocks.ROBlock
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return
}
return elapsed, err
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
return
}
func (s *Service) reportPostBlockProcessing(
@@ -301,28 +316,13 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
if features.Get().EnableLightClient {
// Save a light client bootstrap for the finalized checkpoint
go func() {
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
if err != nil {
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
return
}
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
if err != nil {
log.WithError(err).Error("Could not save light client bootstrap by block root")
} else {
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
}
}()
// Clean up the light client store caches
go func() {
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
if err != nil {
log.WithError(err).Error("Could not migrate light client store to cold storage")
} else {
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
}
}()
}
}

View File

@@ -9,9 +9,9 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
@@ -192,9 +192,7 @@ func TestHandleDA(t *testing.T) {
require.NoError(t, err)
s, _ := minimalTestService(t)
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
require.NoError(t, err)
elapsed, err := s.handleDA(t.Context(), nil, block)
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
require.NoError(t, err)
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
}
@@ -313,10 +311,7 @@ func TestService_HasBlock(t *testing.T) {
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, s.HasBlock(t.Context(), r))
err = s.blockBeingSynced.set(r)
require.NoError(t, err)
err = s.blockBeingSynced.set(r)
require.ErrorIs(t, err, errBlockBeingSynced)
s.blockBeingSynced.set(r)
require.Equal(t, false, s.HasBlock(t.Context(), r))
}
@@ -596,7 +591,11 @@ func TestProcessLightClientBootstrap(t *testing.T) {
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
require.NoError(t, err)
require.NotNil(t, sss)
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
// wait for the goroutine to finish processing
time.Sleep(1 * time.Second)

View File

@@ -15,9 +15,10 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
}
// ReceiveDataColumn receives a single data column.
// (It is only a wrapper around ReceiveDataColumns.)
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
return errors.Wrap(err, "save data column sidecar")
return errors.Wrap(err, "save data column sidecars")
}
return nil

View File

@@ -12,15 +12,17 @@ import (
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
@@ -29,7 +31,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -69,6 +70,7 @@ type Service struct {
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
inclusionListCache *cache.InclusionLists
}
// config options for the service.
@@ -96,6 +98,7 @@ type config struct {
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
CustodyInfo *peerdas.CustodyInfo
}
// Checker is an interface used to determine if a node is in initial sync
@@ -206,12 +209,21 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
if err := s.startFromExecutionChain(); err != nil {
log.Fatal(err)
}
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
go s.updateBlockWithInclusionListRoutine()
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -257,9 +269,6 @@ func (s *Service) Status() error {
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if state.IsNil(saved) {
return errors.New("Last finalized state at startup is nil")
}
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = saved.GenesisTime()
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
@@ -289,20 +298,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
if !params.FuluEnabled() {
return nil
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
return nil
}
@@ -365,6 +360,62 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return nil
}
func (s *Service) startFromExecutionChain() error {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.cfg.ChainStartFetcher == nil {
return errors.New("not configured execution chain")
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("Event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state forRoot failed")
return
}
}
}()
return nil
}
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
if err != nil {
log.WithError(err).Fatal("Could not initialize beacon chain")
}
// We start a counter to genesis, if needed.
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.WithError(err).Fatal("Could not hash tree root genesis state")
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
}
}
// initializes the state and genesis block of the beacon chain to persistent storage
// based on a genesis timestamp value obtained from the ChainStart event emitted
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
@@ -467,57 +518,6 @@ func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
beaconConfig := params.BeaconConfig()
custodyRequirement := beaconConfig.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
if err != nil {
log.WithError(err).Error("Could not update subscription status to all data subnets")
}
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
log.Warnf(
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
flags.SubscribeAllDataSubnets.Name,
)
}
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = beaconConfig.NumberOfColumns
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
return earliestAvailableSlot, custodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
@@ -534,19 +534,3 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
}
func fuluForkSlot() (primitives.Slot, error) {
beaconConfig := params.BeaconConfig()
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -31,7 +31,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/trie"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -52,7 +51,6 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
@@ -73,22 +71,20 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
DepositContainers: []*ethpb.DepositContainer{},
})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
web3Service, err = execution.NewService(
ctx,
execution.WithDatabase(beaconDB),
execution.WithHttpEndpoint(endpoint),
execution.WithDepositContractAddress(common.Address{}),
execution.WithDepositCache(depositCache),
)
require.NoError(t, err, "Unable to set up web3 service")
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
@@ -400,6 +396,24 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(t.Context(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
}
func BenchmarkHasBlockDB(b *testing.B) {
ctx := b.Context()
s := testServiceWithDB(b)

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"slices"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -21,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
return errors.Wrap(err, "could not set up forkchoice checkpoints")
}
if err := s.setupForkchoiceTree(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice tree")
return errors.Wrap(err, "could not set up forkchoice root")
}
if err := s.initializeHead(s.ctx, st); err != nil {
return errors.Wrap(err, "could not initialize head from db")
@@ -31,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
func (s *Service) startupHeadRoot() [32]byte {
headStr := features.Get().ForceHead
jp := s.CurrentJustifiedCheckpt()
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if headStr == "" {
return jRoot
return fRoot
}
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
return jRoot
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
return root
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
return jRoot
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
}
@@ -113,7 +112,6 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
}
}
slices.Reverse(chain)
return chain, nil
}

View File

@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
})
defer resetCfg()
require.Equal(t, service.startupHeadRoot(), gr)
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
})
st, _ := util.DeterministicGenesisState(t, 64)
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
require.NotEqual(t, fRoot, root)
require.Equal(t, root, service.startupHeadRoot())
require.NoError(t, service.setupForkchoiceTree(st))
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
}

View File

@@ -11,26 +11,25 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/libp2p/go-libp2p/core/peer"
"google.golang.org/protobuf/proto"
)
@@ -55,7 +54,6 @@ type mockBroadcaster struct {
type mockAccessor struct {
mockBroadcaster
mockCustodyManager
p2pTesting.MockPeerManager
}
@@ -89,7 +87,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
@@ -99,43 +97,6 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
// mockCustodyManager is a mock implementation of p2p.CustodyManager
type mockCustodyManager struct {
mut sync.RWMutex
earliestAvailableSlot primitives.Slot
custodyGroupCount uint64
}
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.earliestAvailableSlot, nil
}
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.custodyGroupCount, nil
}
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
dch.mut.Lock()
defer dch.mut.Unlock()
dch.earliestAvailableSlot = earliestAvailableSlot
dch.custodyGroupCount = custodyGroupCount
return earliestAvailableSlot, custodyGroupCount, nil
}
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
return 0
}
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database

View File

@@ -640,6 +640,16 @@ func (s *ChainService) GetProposerHead() [32]byte {
return [32]byte{}
}
// GetAttesterHead mocks the same method in the chain service
func (s *ChainService) GetAttesterHead() [32]byte {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.GetAttesterHead()
}
var rootArr [32]byte
copy(rootArr[:], s.Root)
return rootArr
}
// SetForkChoiceGenesisTime mocks the same method in the chain service
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
if s.ForkChoiceStore != nil {
@@ -723,8 +733,7 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
}
// ReceiveDataColumns implements the same method in chain service
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
c.DataColumns = append(c.DataColumns, dcs...)
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
return nil
}

View File

@@ -25,7 +25,6 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
@@ -102,22 +101,6 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
return s.c.SubmitBlindedBlock(ctx, b)
}
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
// After Fulu, relays only return status codes (no payload).
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
defer span.End()
start := time.Now()
defer func() {
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return ErrNoBuilder
}
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
}
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")

View File

@@ -24,22 +24,21 @@ type Config struct {
// MockBuilderService to mock builder.
type MockBuilderService struct {
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
BlobBundleV2 *v1.BlobsBundleV2
ErrSubmitBlindedBlock error
ErrSubmitBlindedBlockPostFulu error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
BidElectra *ethpb.SignedBuilderBidElectra
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
BlobBundleV2 *v1.BlobsBundleV2
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
BidElectra *ethpb.SignedBuilderBidElectra
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
}
// Configured for mocking.
@@ -116,8 +115,3 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
return s.ErrRegisterValidator
}
// SubmitBlindedBlockPostFulu for mocking.
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
return s.ErrSubmitBlindedBlockPostFulu
}

View File

@@ -15,6 +15,7 @@ go_library(
"common.go",
"doc.go",
"error.go",
"inclusion_list.go",
"interfaces.go",
"payload_id.go",
"proposer_indices.go",
@@ -75,6 +76,7 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"inclusion_list_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",

105
beacon-chain/cache/inclusion_list.go vendored Normal file
View File

@@ -0,0 +1,105 @@
package cache
import (
"crypto/sha256"
"sync"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
)
type InclusionLists struct {
mu sync.RWMutex
ils map[primitives.Slot]map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
isBeforeFreezeDeadline bool
}
}
// NewInclusionLists initializes a new InclusionLists instance.
func NewInclusionLists() *InclusionLists {
return &InclusionLists{
ils: make(map[primitives.Slot]map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
isBeforeFreezeDeadline bool
}),
}
}
// Add adds a set of transactions for a specific slot and validator index.
func (i *InclusionLists) Add(slot primitives.Slot, validatorIndex primitives.ValidatorIndex, txs [][]byte, isBeforeFreezeDeadline bool) {
i.mu.Lock()
defer i.mu.Unlock()
if _, ok := i.ils[slot]; !ok {
i.ils[slot] = make(map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
isBeforeFreezeDeadline bool
})
}
entry := i.ils[slot][validatorIndex]
if entry.seenTwice {
return // No need to modify if already marked as seen twice.
}
if entry.txs == nil {
entry.txs = txs
entry.isBeforeFreezeDeadline = isBeforeFreezeDeadline
} else {
entry.seenTwice = true
entry.txs = nil // Clear transactions to save space if seen twice.
}
i.ils[slot][validatorIndex] = entry
}
// Get retrieves unique transactions for a specific slot.
func (i *InclusionLists) Get(slot primitives.Slot) [][]byte {
i.mu.RLock()
defer i.mu.RUnlock()
ils, exists := i.ils[slot]
if !exists {
return [][]byte{}
}
var uniqueTxs [][]byte
seen := make(map[[32]byte]struct{})
for _, entry := range ils {
if !entry.isBeforeFreezeDeadline {
continue
}
for _, tx := range entry.txs {
hash := sha256.Sum256(tx)
if _, duplicate := seen[hash]; !duplicate {
uniqueTxs = append(uniqueTxs, tx)
seen[hash] = struct{}{}
}
}
}
return uniqueTxs
}
// Delete removes all inclusion lists for a specific slot.
func (i *InclusionLists) Delete(slot primitives.Slot) {
i.mu.Lock()
defer i.mu.Unlock()
delete(i.ils, slot)
}
// SeenTwice checks if a validator's transactions were marked as seen twice for a specific slot.
func (i *InclusionLists) SeenTwice(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
i.mu.RLock()
defer i.mu.RUnlock()
ils, exists := i.ils[slot]
if !exists {
return false
}
entry, exists := ils[idx]
return exists && entry.seenTwice
}

View File

@@ -0,0 +1,81 @@
package cache
import (
"bytes"
"testing"
)
func TestInclusionLists(t *testing.T) {
il := NewInclusionLists()
tests := []struct {
name string
actions func()
expectedGet [][]byte
expectedTwice bool
}{
{
name: "Add single validator with unique transactions",
actions: func() {
il.Add(1, 1, [][]byte{[]byte("tx1"), []byte("tx2")}, true)
},
expectedGet: [][]byte{[]byte("tx1"), []byte("tx2")},
expectedTwice: false,
},
{
name: "Add duplicate transactions for second validator",
actions: func() {
il.Add(1, 2, [][]byte{[]byte("tx1"), []byte("tx3")}, true)
},
expectedGet: [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")},
expectedTwice: false,
},
{
name: "Mark validator as seen twice",
actions: func() {
il.Add(1, 1, [][]byte{[]byte("tx4")}, true)
},
expectedGet: [][]byte{[]byte("tx1"), []byte("tx3")},
expectedTwice: true,
},
{
name: "Delete a slot",
actions: func() {
il.Delete(1)
},
expectedGet: nil,
expectedTwice: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.actions()
// Check Get results
got := il.Get(1)
if !compareTransactions(got, tt.expectedGet) {
t.Errorf("unexpected Get result: got %v, want %v", got, tt.expectedGet)
}
// Check SeenTwice result for validator 1
gotTwice := il.SeenTwice(1, 1)
if gotTwice != tt.expectedTwice {
t.Errorf("unexpected SeenTwice result: got %v, want %v", gotTwice, tt.expectedTwice)
}
})
}
}
// compareTransactions compares two slices of byte slices for equality.
func compareTransactions(a, b [][]byte) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if !bytes.Equal(a[i], b[i]) {
return false
}
}
return true
}

View File

@@ -67,30 +67,6 @@ func (s *SyncCommitteeCache) Clear() {
s.cache = cache.NewFIFO(keyFn)
}
// CurrentPeriodPositions returns current period positions of validator indices with respect with
// sync committee. If any input validator index has no assignment, an empty list will be returned
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.positionsInCommittee(root, indices)
if err != nil {
return nil, err
}
result := make([][]primitives.CommitteeIndex, len(pos))
for i, p := range pos {
if p == nil {
result[i] = []primitives.CommitteeIndex{}
} else {
result[i] = p.currentPeriod
}
}
return result, nil
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
// sync committee. If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
@@ -128,7 +104,11 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
return pos.nextPeriod, nil
}
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx primitives.ValidatorIndex,
) (*positionInCommittee, error) {
obj, exists, err := s.cache.GetByKey(key(root))
if err != nil {
return nil, err
@@ -141,33 +121,13 @@ func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primi
if !ok {
return nil, errNotSyncCommitteeIndexPosition
}
result := make([]*positionInCommittee, len(indices))
for i, idx := range indices {
idxInCommittee, ok := item.vIndexToPositionMap[idx]
if ok {
SyncCommitteeCacheHit.Inc()
result[i] = idxInCommittee
} else {
SyncCommitteeCacheMiss.Inc()
result[i] = nil
}
}
return result, nil
}
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx primitives.ValidatorIndex,
) (*positionInCommittee, error) {
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
if err != nil {
return nil, err
}
if len(positions) == 0 {
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
if !ok {
SyncCommitteeCacheMiss.Inc()
return nil, nil
}
return positions[0], nil
SyncCommitteeCacheHit.Inc()
return idxInCommittee, nil
}
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to

View File

@@ -16,11 +16,6 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
return &FakeSyncCommitteeCache{}
}
// CurrentPeriodPositions -- fake
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
return nil, nil
}
// CurrentEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
return nil, nil

View File

@@ -41,6 +41,7 @@ go_library(
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"sort"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/slice"
@@ -40,11 +39,11 @@ func ProcessAttesterSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, err
}
@@ -57,7 +56,7 @@ func ProcessAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
@@ -76,9 +75,10 @@ func ProcessAttesterSlashing(
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
}
slashedAny = true
}

View File

@@ -4,7 +4,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -45,10 +44,11 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
Target: &ethpb.Checkpoint{Epoch: 1}},
})}}
var registry []*ethpb.Validator
currentSlot := primitives.Slot(0)
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{{}},
Validators: registry,
Slot: currentSlot,
})
require.NoError(t, err)
@@ -62,15 +62,16 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
assert.ErrorContains(t, "attestations are not slashable", err)
}
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
var registry []*ethpb.Validator
currentSlot := primitives.Slot(0)
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{{}},
Validators: registry,
Slot: currentSlot,
})
require.NoError(t, err)
@@ -100,7 +101,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
}
@@ -242,7 +243,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, tc.st.SetSlot(currentSlot))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
@@ -264,83 +265,3 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
})
}
}
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
st, keys := util.DeterministicGenesisStateElectra(t, 8)
bal, err := helpers.TotalActiveBalance(st)
require.NoError(t, err)
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
vals := st.Validators()
// We set the total effective balance of slashed validators
// higher than the churn limit for a single epoch.
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
require.NoError(t, st.SetValidators(vals))
sl1att1 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
sl1att2 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{0, 1},
})
slashing1 := &ethpb.AttesterSlashingElectra{
Attestation_1: sl1att1,
Attestation_2: sl1att2,
}
sl2att1 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{2, 3},
})
sl2att2 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{2, 3},
})
slashing2 := &ethpb.AttesterSlashingElectra{
Attestation_1: sl2att1,
Attestation_2: sl2att2,
}
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := keys[0].Sign(signingRoot[:])
sig1 := keys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl1att1.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[0].Sign(signingRoot[:])
sig1 = keys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl1att2.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[2].Sign(signingRoot[:])
sig1 = keys[3].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl2att1.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[2].Sign(signingRoot[:])
sig1 = keys[3].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl2att2.Signature = aggregateSig.Marshal()
exitInfo := v.ExitInformation(st)
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
require.NoError(t, err)
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
}

View File

@@ -191,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
fuzzer.Fuzz(p)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
}
@@ -224,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
}
@@ -334,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
}
@@ -351,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
}

View File

@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
if !newRegistry[expectedSlashedVal].Slashed {

View File

@@ -9,6 +9,7 @@ import (
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -49,12 +50,13 @@ func ProcessVoluntaryExits(
ctx context.Context,
beaconState state.BeaconState,
exits []*ethpb.SignedVoluntaryExit,
exitInfo *v.ExitInfo,
) (state.BeaconState, error) {
// Avoid calculating the epoch churn if no exits exist.
if len(exits) == 0 {
return beaconState, nil
}
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
var exitEpoch primitives.Epoch
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
@@ -66,8 +68,15 @@ func ProcessVoluntaryExits(
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
if err == nil {
if exitEpoch > maxExitEpoch {
maxExitEpoch = exitEpoch
churn = 1
} else if exitEpoch == maxExitEpoch {
churn++
}
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
return nil, err
}
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -47,7 +46,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
}
want := "validator has not been active long enough to exit"
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
assert.ErrorContains(t, want, err)
}
@@ -77,7 +76,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
}
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
assert.ErrorContains(t, want, err)
}
@@ -125,7 +124,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
},
}
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
require.NoError(t, err, "Could not process exits")
newRegistry := newState.Validators()
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {

View File

@@ -184,54 +184,45 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
})
case *ethpb.BeaconStateElectra:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{
Block: electraGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateFulu:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockFulu{
Block: electraGenesisBlock(root),
Block: &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
ExecutionRequests: &enginev1.ExecutionRequests{
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
Deposits: make([]*enginev1.DepositRequest, 0),
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
},
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}
}
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
return &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
ExecutionRequests: &enginev1.ExecutionRequests{
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
Deposits: make([]*enginev1.DepositRequest, 0),
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
},
},
}
}

View File

@@ -7,9 +7,9 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -19,6 +19,11 @@ import (
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
type slashValidatorFunc func(
ctx context.Context,
st state.BeaconState,
vid primitives.ValidatorIndex) (state.BeaconState, error)
// ProcessProposerSlashings is one of the operations performed
// on each processed beacon block to slash proposers based on
// slashing conditions if any slashable events occurred.
@@ -49,11 +54,11 @@ func ProcessProposerSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, err
}
@@ -66,7 +71,7 @@ func ProcessProposerSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
if slashing == nil {
@@ -75,7 +80,7 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}

View File

@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
},
}
want := "mismatched header slots"
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
},
}
want := "expected slashing headers to differ"
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
"validator with key %#x is not slashable",
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
)
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -100,7 +101,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := params.Fork(currentEpoch)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}

View File

@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
// Handle validator ejections.
for _, idx := range eligibleForEjection {
var err error
// exit info is not used in electra
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
// exitQueueEpoch and churn arguments are not used in electra.
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -47,21 +46,18 @@ var (
// # [New in Electra:EIP7251]
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
func ProcessOperations(
ctx context.Context,
st state.BeaconState,
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
// 6110 validations are in VerifyOperationLengths
bb := block.Body()
// Electra extends the altair operations.
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
@@ -72,7 +68,7 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
if err != nil {
return nil, errors.Wrap(err, "could not process voluntary exits")
}

View File

@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
return nil, err
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
continue
}
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
if !exists {
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
continue
}
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
wc := validator.GetWithdrawalCredentials()
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
if !hasCorrectCredential || !isCorrectSourceAddress {
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
continue
}
// Verify the validator is active.
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
log.Debug("Skipping execution layer withdrawal request, validator not active")
log.Debugln("Skipping execution layer withdrawal request, validator not active")
continue
}
// Verify the validator has not yet submitted an exit.
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
continue
}
// Verify the validator has been active long enough.
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
continue
}
@@ -147,8 +147,9 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
if isFullExitRequest {
// Only exit validator if it has no pending withdrawals in the queue
if pendingBalanceToWithdraw == 0 {
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
var err error
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
if err != nil {
return nil, err
}

View File

@@ -99,7 +99,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}

View File

@@ -46,6 +46,9 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// InclusionListReceived is sent after an inclusion list is received from gossip or rpc
InclusionListReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -82,6 +85,11 @@ type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob
}
// InclusionListReceivedData is the data sent with InclusionListReceived events.
type InclusionListReceivedData struct {
SignedInclusionList *ethpb.SignedInclusionList
}
// ProposerSlashingReceivedData is the data sent with ProposerSlashingReceived events.
type ProposerSlashingReceivedData struct {
ProposerSlashing *ethpb.ProposerSlashing

View File

@@ -16,10 +16,10 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
if err := electra.ProcessEpoch(ctx, state); err != nil {
return errors.Wrap(err, "could not process epoch in fulu transition")
}
return ProcessProposerLookahead(ctx, state)
return processProposerLookahead(ctx, state)
}
func ProcessProposerLookahead(ctx context.Context, state state.BeaconState) error {
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
defer span.End()

View File

@@ -15,7 +15,7 @@ import (
)
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {

View File

@@ -7,10 +7,10 @@ go_library(
"beacon_committee.go",
"block.go",
"genesis.go",
"inclusion_list.go",
"legacy.go",
"metrics.go",
"randao.go",
"ranges.go",
"rewards_penalties.go",
"shuffle.go",
"sync_committee.go",
@@ -22,6 +22,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -57,7 +58,6 @@ go_test(
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
"randao_test.go",
"ranges_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",
"sync_committee_test.go",

View File

@@ -272,9 +272,10 @@ func BeaconCommittee(
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
type CommitteeAssignment struct {
Committee []primitives.ValidatorIndex
AttesterSlot primitives.Slot
CommitteeIndex primitives.CommitteeIndex
Committee []primitives.ValidatorIndex
AttesterSlot primitives.Slot
CommitteeIndex primitives.CommitteeIndex
InclusionListCommitteeSlot primitives.Slot
}
// VerifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
@@ -317,15 +318,23 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
originalStateSlot := state.Slot()
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
// Set the state's current slot.
if err := state.SetSlot(slot); err != nil {
return nil, err
}
// Determine the proposer index for the current slot.
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
i, err := BeaconProposerIndex(ctx, state)
if err != nil {
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
}
// Append the slot to the proposer's assignments.
@@ -334,6 +343,12 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments[i] = append(proposerAssignments[i], slot)
}
// Reset state back to its original slot.
if err := state.SetSlot(originalStateSlot); err != nil {
return nil, err
}
return proposerAssignments, nil
}
@@ -431,6 +446,22 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
}
}
if slots.ToEpoch(slot) >= params.BeaconConfig().Eip7805ForkEpoch {
// Retrieve inclusion list committee assignments for the slot and update the assignments map.
indices, err := GetInclusionListCommittee(ctx, state, slot)
if err != nil {
return nil, errors.Wrap(err, "could not get inclusion list committee")
}
for _, vIndex := range indices {
if _, exists := vals[vIndex]; !exists {
continue
}
if _, exists := assignments[vIndex]; !exists {
assignments[vIndex] = &CommitteeAssignment{}
}
assignments[vIndex].InclusionListCommitteeSlot = slot
}
}
}
return assignments, nil
}

View File

@@ -0,0 +1,107 @@
package helpers
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
)
var (
errNilIl = errors.New("nil inclusion list")
errNilCommitteeRoot = errors.New("nil inclusion list committee root")
errNilSignature = errors.New("nil signature")
errIncorrectState = errors.New("incorrect state version")
)
// ValidateNilSignedInclusionList validates that a SignedInclusionList is not nil and contains a signature.
func ValidateNilSignedInclusionList(il *eth.SignedInclusionList) error {
if il == nil {
return errNilIl
}
if il.Signature == nil {
return errNilSignature
}
return ValidateNilInclusionList(il.Message)
}
// ValidateNilInclusionList validates that an InclusionList is not nil and contains a committee root.
func ValidateNilInclusionList(il *eth.InclusionList) error {
if il == nil {
return errNilIl
}
if il.InclusionListCommitteeRoot == nil {
return errNilCommitteeRoot
}
return nil
}
// GetInclusionListCommittee retrieves the validator indices assigned to the inclusion list committee
// for a given slot. Returns an error if the state or slot does not meet the required constraints.
func GetInclusionListCommittee(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
if slots.ToEpoch(slot) < params.BeaconConfig().Eip7805ForkEpoch {
return nil, errIncorrectState
}
epoch := slots.ToEpoch(slot)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainInclusionListCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not get seed")
}
indices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, err
}
start := uint64(slot%params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().InclusionListCommitteeSize
end := start + params.BeaconConfig().InclusionListCommitteeSize
shuffledIndices := make([]primitives.ValidatorIndex, len(indices))
copy(shuffledIndices, indices)
shuffledList, err := UnshuffleList(shuffledIndices, seed)
if err != nil {
return nil, err
}
return shuffledList[start:end], nil
}
// ValidateInclusionListSignature verifies the signature on a SignedInclusionList against the public key
// of the validator specified in the inclusion list.
func ValidateInclusionListSignature(ctx context.Context, st state.ReadOnlyBeaconState, il *eth.SignedInclusionList) error {
if err := ValidateNilSignedInclusionList(il); err != nil {
return err
}
val, err := st.ValidatorAtIndex(il.Message.ValidatorIndex)
if err != nil {
return err
}
pub, err := bls.PublicKeyFromBytes(val.PublicKey)
if err != nil {
return err
}
sig, err := bls.SignatureFromBytes(il.Signature)
if err != nil {
return err
}
currentEpoch := slots.ToEpoch(st.Slot())
domain, err := signing.Domain(st.Fork(), currentEpoch, params.BeaconConfig().DomainInclusionListCommittee, st.GenesisValidatorsRoot())
if err != nil {
return err
}
root, err := signing.ComputeSigningRoot(il.Message, domain)
if err != nil {
return err
}
if !sig.Verify(pub, root[:]) {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,62 +0,0 @@
package helpers
import (
"fmt"
"slices"
)
// SortedSliceFromMap takes a map with uint64 keys and returns a sorted slice of the keys.
func SortedSliceFromMap(toSort map[uint64]bool) []uint64 {
slice := make([]uint64, 0, len(toSort))
for key := range toSort {
slice = append(slice, key)
}
slices.Sort(slice)
return slice
}
// PrettySlice returns a pretty string representation of a sorted slice of uint64.
// `sortedSlice` must be sorted in ascending order.
// Example: [1,2,3,5,6,7,8,10] -> "1-3,5-8,10"
func PrettySlice(sortedSlice []uint64) string {
if len(sortedSlice) == 0 {
return ""
}
var result string
start := sortedSlice[0]
end := sortedSlice[0]
for i := 1; i < len(sortedSlice); i++ {
if sortedSlice[i] == end+1 {
end = sortedSlice[i]
continue
}
if start == end {
result += fmt.Sprintf("%d,", start)
start = sortedSlice[i]
end = sortedSlice[i]
continue
}
result += fmt.Sprintf("%d-%d,", start, end)
start = sortedSlice[i]
end = sortedSlice[i]
}
if start == end {
result += fmt.Sprintf("%d", start)
return result
}
result += fmt.Sprintf("%d-%d", start, end)
return result
}
// SortedPrettySliceFromMap combines SortedSliceFromMap and PrettySlice to return a pretty string representation of the keys in a map.
func SortedPrettySliceFromMap(toSort map[uint64]bool) string {
sorted := SortedSliceFromMap(toSort)
return PrettySlice(sorted)
}

View File

@@ -1,64 +0,0 @@
package helpers_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestSortedSliceFromMap(t *testing.T) {
input := map[uint64]bool{5: true, 3: true, 8: true, 1: true}
expected := []uint64{1, 3, 5, 8}
actual := helpers.SortedSliceFromMap(input)
require.Equal(t, len(expected), len(actual))
for i := range expected {
require.Equal(t, expected[i], actual[i])
}
}
func TestPrettySlice(t *testing.T) {
tests := []struct {
name string
input []uint64
expected string
}{
{
name: "empty slice",
input: []uint64{},
expected: "",
},
{
name: "only distinct elements",
input: []uint64{1, 3, 5, 7, 9},
expected: "1,3,5,7,9",
},
{
name: "single range",
input: []uint64{1, 2, 3, 4, 5},
expected: "1-5",
},
{
name: "multiple ranges and distinct elements",
input: []uint64{1, 2, 3, 5, 6, 7, 8, 10, 12, 13, 14},
expected: "1-3,5-8,10,12-14",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := helpers.PrettySlice(tt.input)
require.Equal(t, tt.expected, actual)
})
}
}
func TestSortedPrettySliceFromMap(t *testing.T) {
input := map[uint64]bool{5: true, 7: true, 8: true, 10: true}
expected := "5,7-8,10"
actual := helpers.SortedPrettySliceFromMap(input)
require.Equal(t, expected, actual)
}

View File

@@ -87,11 +87,6 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
return total, nil
}
// UpdateTotalActiveBalanceCache updates the cache with the given total active balance.
func UpdateTotalActiveBalanceCache(s state.BeaconState, total uint64) error {
return balanceCache.AddTotalEffectiveBalance(s, total)
}
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
//
// Spec pseudocode definition:

View File

@@ -297,30 +297,3 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
}
}
func TestUpdateTotalActiveBalanceCache(t *testing.T) {
helpers.ClearCache()
// Create a test state with some validators
validators := []*ethpb.Validator{
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
{EffectiveBalance: 31 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
Slot: 0,
})
require.NoError(t, err)
// Test updating cache with a specific total
testTotal := uint64(95 * 1e9) // 32 + 32 + 31 = 95
err = helpers.UpdateTotalActiveBalanceCache(state, testTotal)
require.NoError(t, err)
// Verify the cache was updated by retrieving the total active balance
// which should now return the cached value
cachedTotal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
assert.Equal(t, testTotal, cachedTotal, "Cache should return the updated total")
}

View File

@@ -21,39 +21,6 @@ var (
syncCommitteeCache = cache.NewSyncCommittee()
)
// CurrentPeriodPositions returns committee indices of the current period sync committee for input validators.
func CurrentPeriodPositions(st state.BeaconState, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
pos, err := syncCommitteeCache.CurrentPeriodPositions(root, indices)
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
committee, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
// Fill in the cache on miss.
go func() {
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
log.WithError(err).Error("Could not fill sync committee cache on miss")
}
}()
pos = make([][]primitives.CommitteeIndex, len(indices))
for i, idx := range indices {
pubkey := st.PubkeyAtIndex(idx)
pos[i] = findSubCommitteeIndices(pubkey[:], committee.Pubkeys)
}
return pos, nil
}
if err != nil {
return nil, err
}
return pos, nil
}
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
// along with the sync committee root.
// 1. Checks if the public key exists in the sync committee cache

View File

@@ -17,38 +17,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestCurrentPeriodPositions(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys[i] = bytesutil.PadTo(k, 48)
}
state, err := state_native.InitializeFromProtoAltair(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee([32]byte{}, state))
positions, err := helpers.CurrentPeriodPositions(state, []primitives.ValidatorIndex{0, 1})
require.NoError(t, err)
require.Equal(t, 2, len(positions))
require.Equal(t, 1, len(positions[0]))
assert.Equal(t, primitives.CommitteeIndex(0), positions[0][0])
require.Equal(t, 1, len(positions[1]))
assert.Equal(t, primitives.CommitteeIndex(1), positions[1][0])
}
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
helpers.ClearCache()
@@ -110,7 +78,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -297,7 +264,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
params.SetupTestConfigCleanup(t)
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -309,29 +309,23 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
if err != nil {
return 0, errors.Wrap(err, "could not get proposer lookahead")
}
spe := params.BeaconConfig().SlotsPerEpoch
if e == stateEpoch {
return lookAhead[slot%spe], nil
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
}
// The caller is requesting the proposer for the next epoch
return lookAhead[spe+slot%spe], nil
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
}
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
// point of view of the given state as head state
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(state.Slot())
// Even if the state is post Fulu, we may request a past proposer index.
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
// We can use the cached lookahead only for the current and the next epoch.
if e == stateEpoch || e == stateEpoch+1 {
return beaconProposerIndexAtSlotFulu(state, slot)
}
if state.Version() >= version.Fulu {
return beaconProposerIndexAtSlotFulu(state, slot)
}
e := slots.ToEpoch(slot)
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
// For simplicity, the node will skip caching of genesis epoch.
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
s, err := slots.EpochEnd(e - 1)
if err != nil {
return 0, err

View File

@@ -1161,10 +1161,6 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
}
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
lookahead := make([]uint64, 64)
lookahead[0] = 15
lookahead[1] = 16
@@ -1184,4 +1180,8 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(42), idx)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
}

View File

@@ -206,9 +206,9 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)

View File

@@ -292,7 +292,7 @@ func TestMinEpochsForBlockRequests(t *testing.T) {
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (

View File

@@ -3,20 +3,15 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cache.go",
"helpers.go",
"lightclient.go",
"store.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client",
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
visibility = ["//visibility:public"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -39,30 +34,22 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"cache_test.go",
"lightclient_test.go",
"store_test.go",
],
embed = [":go_default_library"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
":go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -26,12 +26,14 @@ const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than re
func NewLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientFinalityUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.LightClientFinalityUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return nil, err
}
@@ -41,11 +43,13 @@ func NewLightClientFinalityUpdateFromBeaconState(
func NewLightClientOptimisticUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientOptimisticUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, nil)
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.LightClientOptimisticUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, nil)
if err != nil {
return nil, err
}
@@ -62,6 +66,7 @@ func NewLightClientOptimisticUpdateFromBeaconState(
// if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
func NewLightClientUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
@@ -516,7 +521,8 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
func BlockToLightClientHeader(
ctx context.Context,
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block. in case of the latter, we might need to upgrade it to the attested block's version.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block.
// in case of the latter, we might need to upgrade it to the attested block's version.
) (interfaces.LightClientHeader, error) {
if block.Version() > attestedBlockVersion {
return nil, errors.Errorf("block version %s is greater than attested block version %s", version.String(block.Version()), version.String(attestedBlockVersion))
@@ -592,10 +598,6 @@ func HasFinality(update interfaces.LightClientUpdate) (bool, error) {
}
func IsBetterUpdate(newUpdate, oldUpdate interfaces.LightClientUpdate) (bool, error) {
if oldUpdate == nil || oldUpdate.IsNil() {
return true, nil
}
maxActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Len()
newNumActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Count()
oldNumActiveParticipants := oldUpdate.SyncAggregate().SyncCommitteeBits.Count()
@@ -753,64 +755,3 @@ func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
numActiveParticipants := syncAggregate.SyncCommitteeBits.Count()
return numActiveParticipants*3 >= maxActiveParticipants*2
}
// IsFinalityUpdateValidForBroadcast checks if a finality update needs to be broadcasted.
// It is also used to check if an incoming gossiped finality update is valid for forwarding and saving.
func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
if oldUpdate == nil {
return true
}
// The finalized_header.beacon.slot is greater than that of all previously forwarded finality_updates,
// or it matches the highest previously forwarded slot and also has a sync_aggregate indicating supermajority (> 2/3)
// sync committee participation while the previously forwarded finality_update for that slot did not indicate supermajority
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
newHasSupermajority := UpdateHasSupermajority(newUpdate.SyncAggregate())
lastUpdateSlot := oldUpdate.FinalizedHeader().Beacon().Slot
lastHasSupermajority := UpdateHasSupermajority(oldUpdate.SyncAggregate())
if newUpdateSlot < lastUpdateSlot {
return false
}
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
return false
}
return true
}
// IsBetterFinalityUpdate checks if the new finality update is better than the old one for saving.
// This does not concern broadcasting, but rather the decision of whether to save the new update.
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
if oldUpdate == nil || oldUpdate.IsNil() {
return true
}
// Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
newFinalizedSlot := newUpdate.FinalizedHeader().Beacon().Slot
newAttestedSlot := newUpdate.AttestedHeader().Beacon().Slot
oldFinalizedSlot := oldUpdate.FinalizedHeader().Beacon().Slot
oldAttestedSlot := oldUpdate.AttestedHeader().Beacon().Slot
if newFinalizedSlot < oldFinalizedSlot {
return false
}
if newFinalizedSlot == oldFinalizedSlot {
if newAttestedSlot < oldAttestedSlot {
return false
}
if newAttestedSlot == oldAttestedSlot && newUpdate.SignatureSlot() <= oldUpdate.SignatureSlot() {
return false
}
}
return true
}
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
if oldUpdate == nil || oldUpdate.IsNil() {
return true
}
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
return newUpdate.AttestedHeader().Beacon().Slot > oldUpdate.AttestedHeader().Beacon().Slot
}

View File

@@ -9,7 +9,7 @@ import (
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
"github.com/OffchainLabs/prysm/v6/runtime/version"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
consensustypes "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -34,7 +34,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Altair", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
@@ -46,7 +46,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Capella", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -59,7 +59,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Deneb", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -72,7 +72,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Electra", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -97,7 +97,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -132,7 +132,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -206,7 +206,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -240,7 +240,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -315,7 +315,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -392,7 +392,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -467,7 +467,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")

View File

@@ -0,0 +1,168 @@
package light_client
import (
"context"
"sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
type Store struct {
mu sync.RWMutex
beaconDB iface.HeadAccessDatabase
lastFinalityUpdate interfaces.LightClientFinalityUpdate
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
}
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
return &Store{
beaconDB: db,
}
}
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client bootstrap from the database
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
if err != nil {
return nil, err
}
if bootstrap == nil { // not found
return nil, ErrLightClientBootstrapNotFound
}
return bootstrap, nil
}
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
s.mu.Lock()
defer s.mu.Unlock()
blk, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
}
if blk == nil {
return errors.Errorf("failed to fetch block for root %x", blockRoot)
}
state, err := s.beaconDB.State(ctx, blockRoot)
if err != nil {
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
}
if state == nil {
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
}
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
if err != nil {
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
}
// Save the light client bootstrap to the database
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
return err
}
return nil
}
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client updatesMap from the database
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
if err != nil {
return nil, err
}
var updates []interfaces.LightClientUpdate
for i := startPeriod; i <= endPeriod; i++ {
update, ok := updatesMap[i]
if !ok {
// Only return the first contiguous range of updates
break
}
updates = append(updates, update)
}
return updates, nil
}
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client update for the given period from the database
update, err := s.beaconDB.LightClientUpdate(ctx, period)
if err != nil {
return nil, err
}
return update, nil
}
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
s.mu.Lock()
defer s.mu.Unlock()
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
if err != nil {
return errors.Wrapf(err, "could not get current light client update")
}
if oldUpdate == nil {
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
if err != nil {
return errors.Wrapf(err, "could not compare light client updates")
}
if isNewUpdateBetter {
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
return nil
}
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
s.mu.Lock()
defer s.mu.Unlock()
s.lastFinalityUpdate = update
}
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastFinalityUpdate
}
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate) {
s.mu.Lock()
defer s.mu.Unlock()
s.lastOptimisticUpdate = update
}
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastOptimisticUpdate
}

View File

@@ -0,0 +1,68 @@
package light_client_test
import (
"testing"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestLightClientStore(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
cfg.BellatrixForkEpoch = 2
cfg.CapellaForkEpoch = 3
cfg.DenebForkEpoch = 4
cfg.ElectraForkEpoch = 5
params.OverrideBeaconConfig(cfg)
// Initialize the light client store
lcStore := &lightClient.Store{}
// Create test light client updates for Capella and Deneb
lCapella := util.NewTestLightClient(t, version.Capella)
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
lDeneb := util.NewTestLightClient(t, version.Deneb)
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
// Initially the store should have nil values for both updates
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
// Set and get finality with Capella update. Optimistic update should be nil
lcStore.SetLastFinalityUpdate(finUpdateCapella)
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
// Set and get optimistic with Capella update. Finality update should be Capella
lcStore.SetLastOptimisticUpdate(opUpdateCapella)
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
// Set and get finality and optimistic with Deneb update
lcStore.SetLastFinalityUpdate(finUpdateDeneb)
lcStore.SetLastOptimisticUpdate(opUpdateDeneb)
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
// Set and get finality and optimistic with nil update
lcStore.SetLastFinalityUpdate(nil)
lcStore.SetLastOptimisticUpdate(nil)
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
}

Some files were not shown because too many files have changed in this diff Show More