mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
173 Commits
peerdas-pe
...
block-prop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73c63a358a | ||
|
|
293a37bef1 | ||
|
|
b99cf362a5 | ||
|
|
9b551959c4 | ||
|
|
8f9026bed8 | ||
|
|
2568e2e087 | ||
|
|
62a4fca4d5 | ||
|
|
7059cf4cf2 | ||
|
|
5c64cb9eb6 | ||
|
|
1bf2188f81 | ||
|
|
d78cb1fd67 | ||
|
|
900f162467 | ||
|
|
5266d34a22 | ||
|
|
c941e47b7e | ||
|
|
54991bbc52 | ||
|
|
7e32bbc199 | ||
|
|
a1be3d68cd | ||
|
|
88af3f90a5 | ||
|
|
600169a53b | ||
|
|
a5e4fccb47 | ||
|
|
e589588f47 | ||
|
|
41884d8d9d | ||
|
|
db074cbf12 | ||
|
|
360e89767f | ||
|
|
238d5c07df | ||
|
|
2292d955a3 | ||
|
|
76bc30e8ba | ||
|
|
a5c7c6da06 | ||
|
|
4b09dd4aa5 | ||
|
|
1dab5a9f8a | ||
|
|
d681232fe6 | ||
|
|
1d24f89c96 | ||
|
|
967193e6a2 | ||
|
|
9e40551852 | ||
|
|
a8cab58f7e | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c | ||
|
|
5c68ec5c39 | ||
|
|
5410232bef | ||
|
|
3f5c4df7e0 | ||
|
|
5c348dff59 | ||
|
|
8136ff7c3a | ||
|
|
f690af81fa | ||
|
|
029b896c79 | ||
|
|
e1117a7de2 | ||
|
|
39b2a02f66 | ||
|
|
4e8a710b64 | ||
|
|
7191a5bcdf | ||
|
|
d335a52c49 | ||
|
|
c7401f5e75 | ||
|
|
0057cc57b5 | ||
|
|
b1dc5e485d | ||
|
|
f035da6fc5 | ||
|
|
854f4bc9a3 | ||
|
|
1933adedbf | ||
|
|
278b796e43 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 | ||
|
|
240cd1d058 | ||
|
|
26d8b6b786 | ||
|
|
92c359456e | ||
|
|
d48ed44c4c | ||
|
|
dbab624f3d | ||
|
|
83e9cc3dbb | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 | ||
|
|
eace128ee9 | ||
|
|
9161b80a32 | ||
|
|
009a1507a1 | ||
|
|
fa71a6e117 | ||
|
|
3da40ecd9c | ||
|
|
f7f992c256 | ||
|
|
09565e0c3a | ||
|
|
05a3736310 | ||
|
|
84c8653a52 | ||
|
|
921ff23c6b | ||
|
|
87235fb384 | ||
|
|
2ec5914b4a | ||
|
|
fe000e5629 | ||
|
|
447a3d8add | ||
|
|
b00aaef202 | ||
|
|
0f6070a866 | ||
|
|
2a09c9f681 | ||
|
|
9c6ccd67c1 | ||
|
|
36e5d4926b | ||
|
|
17b7d3ff12 | ||
|
|
fb2bceece8 | ||
|
|
d012ab653c | ||
|
|
46e7555c42 | ||
|
|
181df3995e | ||
|
|
149e220b61 | ||
|
|
ae4b982a6c | ||
|
|
f330021785 | ||
|
|
bd6b4ecd5b | ||
|
|
d7d8764a91 | ||
|
|
9b7f91d947 | ||
|
|
57e27199bd | ||
|
|
11ca766ed6 | ||
|
|
cd6cc76d58 | ||
|
|
fc4a1469f0 | ||
|
|
f3dc4c283e | ||
|
|
6ddf271688 | ||
|
|
af7afba26e | ||
|
|
b740a4ff83 | ||
|
|
385c2224e8 | ||
|
|
04b39d1a4d | ||
|
|
4c40caf7fd | ||
|
|
bc209cadab | ||
|
|
856742ff68 | ||
|
|
abe16a9cb4 | ||
|
|
77958022e7 | ||
|
|
c21fae239f | ||
|
|
deb3ba7f21 | ||
|
|
f288a3c0e1 | ||
|
|
a4ca6355d0 | ||
|
|
cd0821d026 | ||
|
|
8b53887891 | ||
|
|
8623a144d9 | ||
|
|
f3314d2d24 | ||
|
|
bcd65e7a4d | ||
|
|
dce89a1627 | ||
|
|
09485c2062 | ||
|
|
9e014da0b9 | ||
|
|
d8fedacc26 | ||
|
|
3def16caaa | ||
|
|
4e5bfa9760 | ||
|
|
70ac53f991 | ||
|
|
6f5ff03b42 | ||
|
|
499d27b6ae | ||
|
|
56e8881bc1 | ||
|
|
78f8411ad2 | ||
|
|
83943b5dd8 | ||
|
|
bc7e4f7751 | ||
|
|
02f8726e46 | ||
|
|
16b567f6af | ||
|
|
5c1d827335 | ||
|
|
68d7df0e4f | ||
|
|
288b33750d | ||
|
|
f4f48d6372 | ||
|
|
f2d57f0b5f | ||
|
|
7025e50a6c | ||
|
|
961ea05454 | ||
|
|
da5525096c | ||
|
|
2d2507b907 | ||
|
|
a701f07f3a | ||
|
|
f4bbe5ca40 | ||
|
|
4be8de2476 | ||
|
|
fac509a3e6 | ||
|
|
b1ac8209b2 | ||
|
|
74c9586c66 | ||
|
|
f0ad3dfaeb | ||
|
|
2540196747 | ||
|
|
f133751cce | ||
|
|
bddcc158e4 | ||
|
|
bc7664321b |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
@@ -194,6 +194,7 @@ nogo(
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
|
||||
@@ -2993,7 +2993,7 @@ There are two known issues with this release:
|
||||
|
||||
### Added
|
||||
|
||||
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
|
||||
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
|
||||
details.
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
|
||||
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
|
||||
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
|
||||
"go build" project.
|
||||
|
||||
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -190,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.0",
|
||||
go_version = "1.24.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["common.go"],
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["common_test.go"],
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"header_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
122
api/apiutil/header.go
Normal file
122
api/apiutil/header.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
mt string // canonicalised media‑type, e.g. "application/json"
|
||||
q float64 // quality factor (0‑1)
|
||||
raw string // original string – useful for logging/debugging
|
||||
spec int // 2=exact, 1=type/*, 0=*/*
|
||||
}
|
||||
|
||||
func parseMediaRange(field string) (mediaRange, bool) {
|
||||
field = strings.TrimSpace(field)
|
||||
|
||||
mt, params, err := mime.ParseMediaType(field)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to parse header field")
|
||||
return mediaRange{}, false
|
||||
}
|
||||
|
||||
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
|
||||
|
||||
if qs, ok := params["q"]; ok {
|
||||
v, err := strconv.ParseFloat(qs, 64)
|
||||
if err != nil || v < 0 || v > 1 {
|
||||
log.WithField("q", qs).Debug("Invalid quality factor (0‑1)")
|
||||
return mediaRange{}, false // skip invalid entry
|
||||
}
|
||||
r.q = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case mt == "*/*":
|
||||
r.spec = 0
|
||||
case strings.HasSuffix(mt, "/*"):
|
||||
r.spec = 1
|
||||
}
|
||||
return r, true
|
||||
}
|
||||
|
||||
func hasExplicitQ(r mediaRange) bool {
|
||||
return strings.Contains(strings.ToLower(r.raw), ";q=")
|
||||
}
|
||||
|
||||
// ParseAccept returns media ranges sorted by q (desc) then specificity.
|
||||
func ParseAccept(header string) []mediaRange {
|
||||
if header == "" {
|
||||
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
|
||||
if ei != ej {
|
||||
return ei // explicit beats implicit
|
||||
}
|
||||
if out[i].q != out[j].q {
|
||||
return out[i].q > out[j].q
|
||||
}
|
||||
return out[i].spec > out[j].spec
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// Matches reports whether content type is acceptable per the header.
|
||||
func Matches(header, ct string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
switch {
|
||||
case r.q == 0:
|
||||
continue
|
||||
case r.mt == "*/*":
|
||||
return true
|
||||
case strings.HasSuffix(r.mt, "/*"):
|
||||
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
|
||||
return true
|
||||
}
|
||||
case r.mt == ct:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Negotiate selects the best server type according to the header.
|
||||
// Returns the chosen type and true, or "", false when nothing matches.
|
||||
func Negotiate(header string, serverTypes []string) (string, bool) {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue
|
||||
}
|
||||
for _, s := range serverTypes {
|
||||
if Matches(r.mt, s) {
|
||||
return s, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// PrimaryAcceptMatches only checks if the first accept matches
|
||||
func PrimaryAcceptMatches(header, produced string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue // explicitly unacceptable – skip
|
||||
}
|
||||
return Matches(r.mt, produced)
|
||||
}
|
||||
return false
|
||||
}
|
||||
174
api/apiutil/header_test.go
Normal file
174
api/apiutil/header_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
type want struct {
|
||||
mt string
|
||||
q float64
|
||||
spec int
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
header string
|
||||
want []want
|
||||
}{
|
||||
{
|
||||
name: "empty header becomes */*;q=1",
|
||||
header: "",
|
||||
want: []want{{mt: "*/*", q: 1, spec: 0}},
|
||||
},
|
||||
{
|
||||
name: "quality ordering then specificity",
|
||||
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
|
||||
want: []want{
|
||||
{mt: "application/xml", q: 0.5, spec: 2},
|
||||
{mt: "text/*", q: 0.5, spec: 1},
|
||||
{mt: "application/json", q: 0.2, spec: 2},
|
||||
{mt: "*/*", q: 0.1, spec: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid pieces are skipped",
|
||||
header: "text/plain; q=boom, application/json",
|
||||
want: []want{{mt: "application/json", q: 1, spec: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ParseAccept(tc.header)
|
||||
gotProjected := make([]want, len(got))
|
||||
for i, g := range got {
|
||||
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
|
||||
}
|
||||
require.DeepEqual(t, gotProjected, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
ct string
|
||||
matches bool
|
||||
}{
|
||||
{"exact match", "application/json", "application/json", true},
|
||||
{"type wildcard", "application/*;q=0.8", "application/xml", true},
|
||||
{"global wildcard", "*/*;q=0.1", "image/png", true},
|
||||
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
|
||||
{"no match", "image/png", "application/json", false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := Matches(tc.accept, tc.ct)
|
||||
require.Equal(t, tc.matches, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNegotiate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
serverTypes []string
|
||||
wantType string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "highest quality wins",
|
||||
accept: "application/json;q=0.8,application/xml;q=0.9",
|
||||
serverTypes: []string{"application/json", "application/xml"},
|
||||
wantType: "application/xml",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches first server type",
|
||||
accept: "*/*;q=0.5",
|
||||
serverTypes: []string{"application/octet-stream", "application/json"},
|
||||
wantType: "application/octet-stream",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "no acceptable type",
|
||||
accept: "image/png",
|
||||
serverTypes: []string{"application/json"},
|
||||
wantType: "",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, ok := Negotiate(tc.accept, tc.serverTypes)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
require.Equal(t, tc.wantType, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrimaryAcceptMatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
produced string
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
name: "prefers json",
|
||||
accept: "application/json;q=0.9,application/xml",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard application beats other wildcard",
|
||||
accept: "application/*;q=0.2,*/*;q=0.1",
|
||||
produced: "application/xml",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json wins",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json loses",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "json wins with non q option",
|
||||
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json not primary",
|
||||
accept: "image/png,application/json",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "absent header",
|
||||
accept: "",
|
||||
produced: "text/plain",
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := PrimaryAcceptMatches(tc.accept, tc.produced)
|
||||
require.Equal(t, got, tc.expect)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"health.go",
|
||||
"interfaces.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["health_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node Node
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewTracker(node Node) Tracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(t.Context()); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(t.Context())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
n := NewTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(t.Context())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(t.Context())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy(t.Context()) // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package health
|
||||
|
||||
import "context"
|
||||
|
||||
type Tracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
Node
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = Node(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -50,6 +50,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
|
||||
@@ -30,10 +30,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postBlindedBeaconBlockV2Path = "/eth/v2/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -72,7 +73,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
@@ -89,7 +90,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +102,8 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -152,7 +154,8 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
// It validates that the HTTP response status matches the expectedStatus parameter.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -187,8 +190,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
if r.StatusCode != expectedStatus {
|
||||
err = unexpectedStatusErr(r, expectedStatus)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
@@ -236,7 +239,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
@@ -409,7 +412,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
@@ -446,6 +449,9 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
if blockVersion >= version.Fulu {
|
||||
return version.Fulu, nil
|
||||
}
|
||||
if blockVersion >= version.Deneb {
|
||||
return version.Deneb, nil
|
||||
}
|
||||
@@ -460,7 +466,7 @@ func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -468,7 +474,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
@@ -498,6 +504,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
|
||||
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
|
||||
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockV2Path, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
// Success is indicated by no error (status 202)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
@@ -558,7 +582,7 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
|
||||
func (c *Client) parseBlindedBlockResponse(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if c.sszEnabled {
|
||||
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
|
||||
}
|
||||
@@ -568,8 +592,18 @@ func (c *Client) parseBlindedBlockResponse(
|
||||
func (c *Client) parseBlindedBlockResponseSSZ(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if forkVersion >= version.Deneb {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if forkVersion >= version.Fulu {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundleV2{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundleV2 SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, payloadAndBlobs.BlobsBundle, nil
|
||||
} else if forkVersion >= version.Deneb {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
|
||||
@@ -644,11 +678,11 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
@@ -657,7 +691,7 @@ func non200Err(response *http.Response) error {
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
|
||||
@@ -2,6 +2,7 @@ package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -1553,6 +1555,89 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
Message: "Bad Request",
|
||||
}
|
||||
resp, err := json.Marshal(message)
|
||||
require.NoError(t, err)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
@@ -1573,3 +1658,166 @@ func TestRequestLogger(t *testing.T) {
|
||||
err = c.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVersionsBlockToPayload(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
blockVersion int
|
||||
expectedVersion int
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "Fulu version",
|
||||
blockVersion: 6, // version.Fulu
|
||||
expectedVersion: 6,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Deneb version",
|
||||
blockVersion: 4, // version.Deneb
|
||||
expectedVersion: 4,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Capella version",
|
||||
blockVersion: 3, // version.Capella
|
||||
expectedVersion: 3,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix version",
|
||||
blockVersion: 2, // version.Bellatrix
|
||||
expectedVersion: 2,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Unsupported version",
|
||||
blockVersion: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
version, err := getVersionsBlockToPayload(tt.blockVersion)
|
||||
if tt.expectedError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedVersion, version)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBlindedBlockResponseSSZ_WithBlobsBundleV2(t *testing.T) {
|
||||
c := &Client{sszEnabled: true}
|
||||
|
||||
// Create test payload
|
||||
payload := &v1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 123456,
|
||||
GasLimit: 30000000,
|
||||
GasUsed: 21000,
|
||||
Timestamp: 1234567890,
|
||||
ExtraData: []byte("test-extra-data"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*v1.Withdrawal{},
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
// Create test BlobsBundleV2
|
||||
bundleV2 := &v1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Blobs: [][]byte{make([]byte, 131072), make([]byte, 131072)},
|
||||
}
|
||||
|
||||
// Test Fulu version (should use ExecutionPayloadDenebAndBlobsBundleV2)
|
||||
t.Run("Fulu version with BlobsBundleV2", func(t *testing.T) {
|
||||
payloadAndBlobsV2 := &v1.ExecutionPayloadDenebAndBlobsBundleV2{
|
||||
Payload: payload,
|
||||
BlobsBundle: bundleV2,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobsV2.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 6) // version.Fulu
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is BlobsBundleV2
|
||||
bundleV2Result, ok := bundle.(*v1.BlobsBundleV2)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundleV2 type")
|
||||
require.Equal(t, len(bundleV2.KzgCommitments), len(bundleV2Result.KzgCommitments))
|
||||
require.Equal(t, len(bundleV2.Proofs), len(bundleV2Result.Proofs))
|
||||
require.Equal(t, len(bundleV2.Blobs), len(bundleV2Result.Blobs))
|
||||
})
|
||||
|
||||
// Test Deneb version (should use regular BlobsBundle)
|
||||
t.Run("Deneb version with regular BlobsBundle", func(t *testing.T) {
|
||||
regularBundle := &v1.BlobsBundle{
|
||||
KzgCommitments: bundleV2.KzgCommitments,
|
||||
Proofs: bundleV2.Proofs,
|
||||
Blobs: bundleV2.Blobs,
|
||||
}
|
||||
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{
|
||||
Payload: payload,
|
||||
BlobsBundle: regularBundle,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobs.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 4) // version.Deneb
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is regular BlobsBundle
|
||||
regularBundleResult, ok := bundle.(*v1.BlobsBundle)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundle type")
|
||||
require.Equal(t, len(regularBundle.KzgCommitments), len(regularBundleResult.KzgCommitments))
|
||||
})
|
||||
|
||||
// Test invalid SSZ data
|
||||
t.Run("Invalid SSZ data", func(t *testing.T) {
|
||||
invalidBytes := []byte("invalid-ssz-data")
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(invalidBytes, 6)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, true, ed == nil)
|
||||
assert.Equal(t, true, bundle == nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
// Note: The full integration test is complex due to version detection logic
|
||||
// The key functionality is tested in the parseBlindedBlockResponseSSZ tests above
|
||||
// and in the mock service tests which verify the interface changes work correctly
|
||||
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
// by using reflection or by checking it compiles with the interface
|
||||
var _ func(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) = client.SubmitBlindedBlock
|
||||
|
||||
// This test passes if the signature is correct
|
||||
assert.Equal(t, true, true)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,10 +41,15 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu --
|
||||
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
func (MockClient) Status(_ context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestErrorMessage_non200Err(t *testing.T) {
|
||||
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
|
||||
mockRequest := &http.Request{
|
||||
URL: &url.URL{Path: "example.com"},
|
||||
}
|
||||
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := non200Err(tt.args)
|
||||
err := unexpectedStatusErr(tt.args, http.StatusOK)
|
||||
if err != nil && tt.wantMessage != "" {
|
||||
require.ErrorContains(t, tt.wantMessage, err)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
)
|
||||
|
||||
@@ -8,7 +8,12 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_rs_cors//:go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -22,5 +27,6 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
@@ -71,47 +75,64 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
|
||||
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
|
||||
func AcceptEncodingHeaderHandler() Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
gz := gzip.NewWriter(w)
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zip {
|
||||
return
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip writer")
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
next.ServeHTTP(gzipRW, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
gz *gzip.Writer
|
||||
http.ResponseWriter
|
||||
zip bool
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
// Removing the current Content-Length because zipping will change it.
|
||||
g.Header().Del("Content-Length")
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
g.zip = true
|
||||
}
|
||||
|
||||
g.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if g.zip {
|
||||
return g.gz.Write(b)
|
||||
}
|
||||
return g.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
|
||||
if len(mw) < 1 {
|
||||
return h
|
||||
|
||||
@@ -1,14 +1,35 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// frozenHeaderRecorder allows asserting that response headers were not modified
|
||||
// after the call to WriteHeader.
|
||||
//
|
||||
// Its purpose is to have a regression test for https://github.com/OffchainLabs/prysm/pull/15499.
|
||||
type frozenHeaderRecorder struct {
|
||||
*httptest.ResponseRecorder
|
||||
frozenHeader http.Header
|
||||
}
|
||||
|
||||
func (r *frozenHeaderRecorder) WriteHeader(code int) {
|
||||
if r.frozenHeader != nil {
|
||||
return
|
||||
}
|
||||
r.ResponseRecorder.WriteHeader(code)
|
||||
r.frozenHeader = r.ResponseRecorder.Header().Clone()
|
||||
}
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
@@ -124,6 +145,90 @@ func TestContentTypeHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
dummyContent := "Test gzip middleware content"
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", r.Header.Get("Accept"))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(dummyContent))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptEncodingHeaderHandler()(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
acceptEncoding string
|
||||
expectCompressed bool
|
||||
}{
|
||||
{
|
||||
name: "Accept gzip",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept multiple encodings",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate, gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept unsupported encoding",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "No accept encoding header",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "SSZ",
|
||||
accept: api.OctetStreamMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("Accept", tt.accept)
|
||||
if tt.acceptEncoding != "" {
|
||||
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
|
||||
}
|
||||
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if tt.expectCompressed {
|
||||
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
|
||||
|
||||
compressedBody := rr.Body.Bytes()
|
||||
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
|
||||
|
||||
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
|
||||
require.NoError(t, err, "Failed to create gzipReader")
|
||||
defer func() {
|
||||
if err := gzReader.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip reader")
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedBody, err := io.ReadAll(gzReader)
|
||||
require.NoError(t, err, "Failed to decompress response body")
|
||||
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
|
||||
} else {
|
||||
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
// Add validation to check if the signature is valid BLS format
|
||||
_, err = bls.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SyncCommitteeMessage{
|
||||
Slot: primitives.Slot(slot),
|
||||
|
||||
@@ -74,7 +74,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
}
|
||||
|
||||
return &BeaconState{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -177,7 +177,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
}
|
||||
|
||||
return &BeaconStateAltair{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -295,7 +295,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
}
|
||||
|
||||
return &BeaconStateBellatrix{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -430,7 +430,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
}
|
||||
|
||||
return &BeaconStateCapella{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -568,7 +568,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
}
|
||||
|
||||
return &BeaconStateDeneb{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -742,7 +742,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -932,7 +932,7 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
|
||||
@@ -283,3 +283,16 @@ type GetPendingPartialWithdrawalsResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
type GetProposerLookaheadResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
type GetBlobsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type Identity struct {
|
||||
type Metadata struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
Syncnets string `json:"syncnets,omitempty"`
|
||||
Cgc string `json:"custody_group_count,omitempty"`
|
||||
}
|
||||
|
||||
type GetPeerResponse struct {
|
||||
|
||||
@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.WithField("function", funcName).Trace("running")
|
||||
log.WithField("function", funcName).Trace("Running")
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
log.WithField("function", funcName).Debug("context is closed, exiting")
|
||||
log.WithField("function", funcName).Debug("Context is closed, exiting")
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -63,6 +62,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -147,7 +148,6 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -160,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
@@ -181,6 +182,7 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -194,6 +196,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -41,7 +41,7 @@ type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
@@ -514,7 +514,7 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
s.genesisTime = t.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
}
|
||||
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -27,7 +28,7 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
|
||||
@@ -3,9 +3,6 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -14,10 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -31,11 +25,8 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{root: [32]byte{'A'}}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
@@ -51,13 +42,10 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{block: wsb}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
@@ -73,10 +61,8 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -138,7 +138,7 @@ func TestFinalizedBlockHash(t *testing.T) {
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
service := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -153,7 +153,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
@@ -200,7 +200,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{block: wsb, state: s}
|
||||
|
||||
received, err := c.HeadBlock(t.Context())
|
||||
@@ -213,7 +213,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -221,7 +221,8 @@ func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Unix(999, 0)
|
||||
wanted := time.Unix(999, 0)
|
||||
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
@@ -242,7 +243,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
@@ -250,7 +251,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
@@ -269,7 +270,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.HeadETH1Data(), d) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
@@ -298,7 +299,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
|
||||
@@ -313,7 +314,7 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
@@ -332,7 +333,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
c := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -366,7 +367,7 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -381,7 +382,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -396,7 +397,7 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
@@ -409,7 +410,7 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
@@ -431,7 +432,9 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
c.SetGenesisTime(time.Now())
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
@@ -450,14 +453,15 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// If head is nil, for some reason, an error should be returned rather than panic.
|
||||
c = &Service{}
|
||||
c = testServiceNoDB(t)
|
||||
_, err = c.IsOptimistic(ctx)
|
||||
require.ErrorIs(t, err, ErrNilHead)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Now()
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
@@ -465,7 +469,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -481,9 +486,10 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -534,9 +540,9 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -573,9 +579,9 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -593,9 +599,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
@@ -619,6 +625,7 @@ func Test_hashForGenesisRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -7,10 +7,15 @@ type currentlySyncingBlock struct {
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
if ok {
|
||||
return errBlockBeingSynced
|
||||
}
|
||||
b.roots[root] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -17,9 +16,6 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
// a higher number of fragmented indexes are reallocated to a new separate slice for
|
||||
// that field.
|
||||
func (s *Service) defragmentState(st state.BeaconState) {
|
||||
if !features.Get().EnableExperimentalState {
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
st.Defragment()
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
@@ -44,6 +44,8 @@ var (
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
// errBlockBeingSynced is returned when a block is being synced.
|
||||
errBlockBeingSynced = errors.New("block is being synced")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
log.WithError(err).Error("Could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
@@ -354,7 +355,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
|
||||
@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not compute payload attributes")
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,11 +99,9 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
@@ -114,7 +112,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
@@ -132,17 +130,17 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
if sss >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
"sinceSlotStart": sss,
|
||||
"threshold": doublylinkedtree.ProcessAttestationsThreshold,
|
||||
}).Info("Attempted late block reorg aborted due to attestations after threshold")
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,6 +160,7 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
fcs.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
@@ -180,11 +181,12 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
wantLog := "aborted due to attestations after threshold"
|
||||
fcs.SetGenesisTime(time.Now().Add(-29 * time.Second))
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
require.LogsDoNotContain(t, hook, wantLog)
|
||||
fcs.SetGenesisTime(time.Now().Add(-24 * time.Second))
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
require.LogsContain(t, hook, wantLog)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
log.WithError(err).Error("Could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
|
||||
@@ -18,9 +18,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Initialize the state cache for sync committees.
|
||||
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
@@ -143,7 +140,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
defer mLock.Unlock()
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
syncHeadStateHit.Inc()
|
||||
@@ -166,7 +163,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
return nil, err
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
err = s.syncCommitteeHeadState.Put(slot, headState)
|
||||
return headState, err
|
||||
default:
|
||||
// In the event, we encounter another error
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
@@ -38,7 +37,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
@@ -52,7 +51,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
@@ -66,7 +65,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
@@ -81,7 +80,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
@@ -95,7 +94,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
@@ -109,7 +108,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
@@ -122,17 +121,14 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := syncCommitteeHeadStateCache
|
||||
t.Cleanup(func() {
|
||||
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
})
|
||||
s := testServiceNoDB(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState, err := c.Get(101)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(101)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
require.Equal(t, nil, cachedState)
|
||||
require.NoError(t, c.Put(101, beaconState))
|
||||
cachedState, err = c.Get(101)
|
||||
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
|
||||
cachedState, err = s.syncCommitteeHeadState.Get(101)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -154,14 +153,10 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{1}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
@@ -185,15 +180,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
srv.originBlockRoot = genesisRoot
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
@@ -407,7 +398,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
@@ -102,7 +105,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
//go:embed trusted_setup_4096.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
@@ -42,3 +24,144 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -86,8 +86,8 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,16 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(t.Context(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
func testServiceOptsWithDB(t testing.TB) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
@@ -31,3 +32,15 @@ func testServiceOptsNoDB() []Option {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
}
|
||||
|
||||
func testServiceNoDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testServiceWithDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -33,6 +32,14 @@ func WithMaxGoroutines(x int) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWeakSubjectivityCheckpoint for checkpoint sync.
|
||||
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -227,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -246,7 +245,7 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
// WithGenesisTime sets the genesis time for the blockchain service.
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,10 +60,8 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, s.genesisTime, time.Now().Add(disparity), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -76,7 +74,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -139,8 +139,8 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
func verifyAttTargetEpoch(_ context.Context, genesis, now time.Time, c *ethpb.Checkpoint) error {
|
||||
currentSlot := slots.At(genesis, now)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
var prevEpoch primitives.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
|
||||
@@ -355,22 +355,22 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
nowTime := time.Unix(2*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
err := verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -240,13 +240,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -283,14 +284,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
// Insert all nodes to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -308,6 +305,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -329,7 +350,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
@@ -339,12 +360,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -562,7 +583,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -584,7 +605,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +613,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +628,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +640,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -639,14 +660,14 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -666,10 +687,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
@@ -690,16 +710,21 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
@@ -712,7 +737,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -737,7 +762,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
@@ -812,7 +840,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -855,7 +883,10 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
@@ -906,7 +937,7 @@ func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -927,10 +958,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -944,7 +975,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -31,9 +28,13 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
}
|
||||
|
||||
// getFCUArgs returns the arguments to call forkchoice update
|
||||
@@ -45,7 +46,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
@@ -68,11 +69,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
@@ -130,38 +131,26 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientBootstrap(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client bootstrap")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
return errors.New("attested block is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
return errors.New("attested state is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
@@ -169,201 +158,17 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
|
||||
return
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(
|
||||
cfg.ctx,
|
||||
s.CurrentSlot(),
|
||||
cfg.postState,
|
||||
cfg.roblock,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
// processLightClientBootstrap saves a light client bootstrap for this block
|
||||
// when feature flag is enabled.
|
||||
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
|
||||
blockRoot := cfg.roblock.Root()
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client bootstrap")
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client bootstrap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState,
|
||||
) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastFinalityUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
|
||||
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
|
||||
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
|
||||
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
|
||||
|
||||
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
|
||||
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
|
||||
|
||||
if newUpdateSlot < lastUpdateSlot {
|
||||
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
|
||||
return nil
|
||||
}
|
||||
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
|
||||
log.Debug("Skip saving light client finality update: No supermajority advantage")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Debug("Saving new light client finality update")
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastOptimisticUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
|
||||
log.Debug("Skip saving light client optimistic update: Older than local update")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client optimistic update")
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
|
||||
return nil
|
||||
log.Debug("Processed light client updates")
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
@@ -432,7 +237,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -527,7 +332,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// is meant to be asynchronous and run in the background rather than being
|
||||
// tied to the execution of a block.
|
||||
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
|
||||
log.WithError(err).Error("could not migrate to cold")
|
||||
log.WithError(err).Error("Could not migrate to cold")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -561,6 +366,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
@@ -569,15 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
root := signed.Block().ParentRoot()
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -596,12 +397,13 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 1 {
|
||||
if len(pendingNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
slices.Reverse(pendingNodes)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
@@ -616,7 +418,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
log.WithError(err).Error("Could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -634,7 +436,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("Could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
@@ -643,12 +445,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
log.WithError(err).Error("Could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
log.WithError(err).Error("Could not prune deposit proofs")
|
||||
}
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(ss, s.genesisTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,9 +177,9 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, genesisState.SetGenesisTime(time.Now().Add(-1*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -84,12 +83,16 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
err := s.blockBeingSynced.set(blockRoot)
|
||||
if errors.Is(err, errBlockBeingSynced) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
|
||||
return nil
|
||||
}
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
@@ -100,17 +103,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validator execution and consensus")
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle da")
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
@@ -131,10 +134,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.postBlockProcess(args); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "post block process")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "update checkpoints")
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if s.slasherEnabled {
|
||||
@@ -148,12 +151,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle caches")
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
@@ -183,7 +186,7 @@ func (s *Service) updateCheckpoints(
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
log.WithError(err).Error("Could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
@@ -236,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
return
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -283,7 +268,7 @@ func (s *Service) reportPostBlockProcessing(
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
@@ -300,15 +285,45 @@ func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
|
||||
// Send finalization event
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
// Insert finalized deposits into finalized deposit trie
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
|
||||
return
|
||||
}
|
||||
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up the light client store caches
|
||||
go func() {
|
||||
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not migrate light client store to cold storage")
|
||||
} else {
|
||||
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
|
||||
@@ -8,7 +8,10 @@ import (
|
||||
blockchainTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -18,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -188,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -307,7 +313,10 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(t.Context(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.NoError(t, err)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.ErrorIs(t, err, errBlockBeingSynced)
|
||||
require.Equal(t, false, s.HasBlock(t.Context(), r))
|
||||
}
|
||||
|
||||
@@ -338,7 +347,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
s.genesisTime = time.Now()
|
||||
s.SetGenesisTime(time.Now())
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
@@ -348,8 +357,9 @@ func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
helpers.ClearCache()
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
@@ -562,3 +572,44 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
|
||||
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
|
||||
|
||||
cp := l.AttestedState.FinalizedCheckpoint()
|
||||
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, btst, b)
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,10 +15,9 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -12,17 +12,15 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -31,6 +29,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -69,6 +68,7 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -96,7 +96,6 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -180,14 +179,15 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -206,17 +206,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -265,8 +257,11 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -294,6 +289,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -356,62 +365,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
@@ -422,7 +375,7 @@ func (s *Service) initializeBeaconChain(
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
@@ -483,7 +436,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
|
||||
if err := s.setHead(&head{
|
||||
genesisBlkRoot,
|
||||
@@ -514,6 +467,57 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -530,3 +534,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -17,10 +16,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -127,7 +131,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -164,7 +168,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -238,7 +242,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -345,12 +349,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
fc := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
@@ -371,10 +371,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -391,48 +388,21 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||
bb := util.NewBeaconBlock()
|
||||
r, err := bb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
|
||||
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(t.Context(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := b.Context()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
@@ -448,10 +418,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -20,7 +21,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +31,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
@@ -64,16 +65,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -112,6 +113,7 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
slices.Reverse(chain)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
@@ -170,6 +172,6 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
return nil
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
@@ -4,29 +4,33 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -51,6 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -84,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -94,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
@@ -109,8 +151,10 @@ type testServiceRequirements struct {
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := t.Context()
|
||||
genesis := time.Now().Add(-1 * 4 * time.Duration(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second) // Genesis was 4 epochs ago.
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
fcs.SetGenesisTime(genesis)
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
@@ -149,6 +193,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithLightClientStore(&lightclient.Store{}),
|
||||
WithGenesisTime(genesis),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -641,7 +641,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
@@ -723,7 +723,8 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dcs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -17,11 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1792480
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -68,15 +63,15 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
|
||||
s.wsVerifier = wv
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
|
||||
@@ -24,7 +24,8 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -68,7 +69,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
}
|
||||
@@ -87,7 +88,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
|
||||
// After Fulu, relays only return status codes (no payload).
|
||||
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
|
||||
@@ -24,20 +24,22 @@ type Config struct {
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
ErrSubmitBlindedBlockPostFulu error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -46,7 +48,7 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
@@ -66,6 +68,16 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
case version.Fulu:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload for fulu")
|
||||
}
|
||||
// For Fulu, return BlobsBundleV2 if available, otherwise regular BlobsBundle
|
||||
if s.BlobBundleV2 != nil {
|
||||
return w, s.BlobBundleV2, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
default:
|
||||
return nil, nil, errors.New("unknown block version for mocking")
|
||||
}
|
||||
@@ -104,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return s.ErrSubmitBlindedBlockPostFulu
|
||||
}
|
||||
|
||||
62
beacon-chain/cache/sync_committee.go
vendored
62
beacon-chain/cache/sync_committee.go
vendored
@@ -67,6 +67,30 @@ func (s *SyncCommitteeCache) Clear() {
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions returns current period positions of validator indices with respect with
|
||||
// sync committee. If any input validator index has no assignment, an empty list will be returned
|
||||
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.positionsInCommittee(root, indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([][]primitives.CommitteeIndex, len(pos))
|
||||
for i, p := range pos {
|
||||
if p == nil {
|
||||
result[i] = []primitives.CommitteeIndex{}
|
||||
} else {
|
||||
result[i] = p.currentPeriod
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
@@ -104,11 +128,7 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,13 +141,33 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result := make([]*positionInCommittee, len(indices))
|
||||
for i, idx := range indices {
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[idx]
|
||||
if ok {
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
result[i] = idxInCommittee
|
||||
} else {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result[i] = nil
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(positions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
return positions[0], nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
@@ -178,7 +218,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,11 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions -- fake
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of bounds",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of bounds",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -217,15 +217,15 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
|
||||
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
|
||||
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(slot, genesisTime); err != nil {
|
||||
return err
|
||||
}
|
||||
messageTime, err := slots.ToTime(uint64(genesisTime.Unix()), slot)
|
||||
messageTime, err := slots.StartTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSlot := slots.Since(genesisTime)
|
||||
slotStartTime, err := slots.ToTime(uint64(genesisTime.Unix()), currentSlot)
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
slotStartTime, err := slots.StartTime(genesisTime, currentSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
@@ -39,11 +40,11 @@ func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
@@ -75,10 +76,9 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
|
||||
}
|
||||
slashedAny = true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -44,11 +45,10 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
})}}
|
||||
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -62,16 +62,15 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -243,7 +242,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -265,3 +264,83 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 8)
|
||||
bal, err := helpers.TotalActiveBalance(st)
|
||||
require.NoError(t, err)
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
|
||||
vals := st.Validators()
|
||||
|
||||
// We set the total effective balance of slashed validators
|
||||
// higher than the churn limit for a single epoch.
|
||||
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
|
||||
sl1att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
sl1att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
slashing1 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl1att1,
|
||||
Attestation_2: sl1att2,
|
||||
}
|
||||
sl2att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
sl2att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
slashing2 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl2att1,
|
||||
Attestation_2: sl2att2,
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := keys[0].Sign(signingRoot[:])
|
||||
sig1 := keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[0].Sign(signingRoot[:])
|
||||
sig1 = keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
|
||||
}
|
||||
|
||||
@@ -119,6 +119,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = Eth1DataHasEnoughSupport(s, eth1data)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -190,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
|
||||
}
|
||||
@@ -223,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -319,6 +320,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -332,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
|
||||
}
|
||||
@@ -349,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
|
||||
}
|
||||
@@ -382,5 +384,6 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
_ = err
|
||||
err = VerifyExitAndSignature(val, s, ve)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -50,13 +49,12 @@ func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
exitInfo *v.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
@@ -68,15 +66,8 @@ func ProcessVoluntaryExits(
|
||||
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
if err == nil {
|
||||
if exitEpoch > maxExitEpoch {
|
||||
maxExitEpoch = exitEpoch
|
||||
churn = 1
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
|
||||
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -46,7 +47,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator has not been active long enough to exit"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -124,7 +125,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry := newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {
|
||||
|
||||
@@ -162,7 +162,7 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -501,7 +501,7 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -551,7 +551,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -627,7 +627,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -697,7 +697,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
ts, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,11 +19,6 @@ import (
|
||||
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
|
||||
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
|
||||
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
// slashing conditions if any slashable events occurred.
|
||||
@@ -54,11 +49,11 @@ func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +66,7 @@ func ProcessProposerSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
if slashing == nil {
|
||||
@@ -80,7 +75,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "mismatched header slots"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "expected slashing headers to differ"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
|
||||
"validator with key %#x is not slashable",
|
||||
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
|
||||
)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -101,7 +100,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -59,7 +59,7 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -194,11 +194,11 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("failed to find source validator index")
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -280,7 +280,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to fetch pending balance to withdraw")
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
@@ -290,7 +290,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to compute consolidation epoch")
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
|
||||
@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
// Handle validator ejections.
|
||||
for _, idx := range eligibleForEjection {
|
||||
var err error
|
||||
// exitQueueEpoch and churn arguments are not used in electra.
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
|
||||
// exit info is not used in electra
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -46,18 +47,21 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -68,7 +72,7 @@ func ProcessOperations(
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -147,9 +147,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user