mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-22 03:38:02 -05:00
Compare commits
76 Commits
poc/option
...
gRPC-fallb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe7b2bf20e | ||
|
|
7cc6ded31a | ||
|
|
5fd3300fdb | ||
|
|
f74a9cb3ec | ||
|
|
3d903d5d75 | ||
|
|
5f335b1b58 | ||
|
|
1f7f7c6833 | ||
|
|
4e952354d1 | ||
|
|
5268da43f1 | ||
|
|
a6fc327cfb | ||
|
|
cf04b457a6 | ||
|
|
4586b0accf | ||
|
|
3a71ad2ec1 | ||
|
|
588766e520 | ||
|
|
ecc19bc6ed | ||
|
|
164d2d50fd | ||
|
|
a135a336c3 | ||
|
|
5f189f002e | ||
|
|
1355a9ff4d | ||
|
|
34478f30c8 | ||
|
|
214b4428e6 | ||
|
|
bca6166e82 | ||
|
|
b6818853b4 | ||
|
|
5a56bfcf98 | ||
|
|
a08f185170 | ||
|
|
15b1d68249 | ||
|
|
885d9cc478 | ||
|
|
511248213c | ||
|
|
db82f3cc9d | ||
|
|
9c874037d1 | ||
|
|
1a936e2ffa | ||
|
|
6027518ad5 | ||
|
|
e4a6bc7065 | ||
|
|
a2982f0807 | ||
|
|
73e9d6e0ce | ||
|
|
2e43d50364 | ||
|
|
71e7b526d2 | ||
|
|
ea8baab7b0 | ||
|
|
9bb231fb3b | ||
|
|
6432140603 | ||
|
|
5e0a9ff992 | ||
|
|
0bfd661baf | ||
|
|
b21acc0bbb | ||
|
|
f6f65987c6 | ||
|
|
d26cdd74ee | ||
|
|
d1905cb018 | ||
|
|
37c5178fa8 | ||
|
|
13f8e7b47f | ||
|
|
124eadd56e | ||
|
|
76420d9428 | ||
|
|
21366e11ca | ||
|
|
7ed4d496dd | ||
|
|
158c09ca8c | ||
|
|
17245f4fac | ||
|
|
53b0a574ab | ||
|
|
c96d188468 | ||
|
|
9f828bdd88 | ||
|
|
0fcb922702 | ||
|
|
3646a77bfb | ||
|
|
1541558261 | ||
|
|
1a6252ade4 | ||
|
|
27c009e7ff | ||
|
|
ffad861e2c | ||
|
|
17413b52ed | ||
|
|
a651e7f0ac | ||
|
|
3e1cb45e92 | ||
|
|
792fa22099 | ||
|
|
c5b3d3531c | ||
|
|
fc2dcb0e88 | ||
|
|
cc4510bb77 | ||
|
|
888db581dd | ||
|
|
f1d2ee72e2 | ||
|
|
31f18b9f60 | ||
|
|
6462c997e9 | ||
|
|
6fa0e9cf5f | ||
|
|
6b5ba5ad01 |
23
.github/workflows/check-logs.yml
vendored
Normal file
23
.github/workflows/check-logs.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check log.go files
|
||||
on: [ pull_request ]
|
||||
|
||||
jobs:
|
||||
check-logs:
|
||||
runs-on: ubuntu-4
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Install ripgrep
|
||||
run: sudo apt-get install -y ripgrep
|
||||
|
||||
- name: Check log.go files
|
||||
run: ./hack/check-logs.sh
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -44,3 +44,6 @@ tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
# execution client data
|
||||
execution/
|
||||
|
||||
10
BUILD.bazel
10
BUILD.bazel
@@ -1,5 +1,4 @@
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
@@ -55,15 +54,6 @@ alias(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
name = "gometalinter",
|
||||
config = "//:.gometalinter.json",
|
||||
paths = [
|
||||
"./...",
|
||||
],
|
||||
prefix = prefix,
|
||||
)
|
||||
|
||||
goimports(
|
||||
name = "goimports",
|
||||
display_diffs = True,
|
||||
|
||||
60
CHANGELOG.md
60
CHANGELOG.md
@@ -4,6 +4,66 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
|
||||
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
|
||||
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Fixed potential deadlock scenario in data column batch verification
|
||||
- Improved processing and metrics for cells and proofs
|
||||
|
||||
We are aware of [an issue](https://github.com/OffchainLabs/prysm/issues/16160) where Prysm struggles to sync from an out of sync state. We will have another release before the end of the year to address this issue.
|
||||
|
||||
Our postmortem document from the December 4th mainnet issue has been published on our [documentation site](https://prysm.offchainlabs.com/docs/misc/mainnet-postmortems/)
|
||||
|
||||
### Added
|
||||
|
||||
- Track the dependent root of the latest finalized checkpoint in forkchoice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16103)
|
||||
- Proposal design document to implement graffiti. Currently it is empty by default and the idea is to have it of the form GE168dPR63af. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15983)
|
||||
- Add support for detecting and logging per address reachability via libp2p AutoNAT v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16100)
|
||||
- Static analyzer that ensures each `httputil.HandleError` call is followed by a `return` statement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16134)
|
||||
- Prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
- Prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
|
||||
### Changed
|
||||
|
||||
- Optimise migratetocold by not doing brute force for loop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16101)
|
||||
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16145)
|
||||
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
|
||||
### Removed
|
||||
|
||||
- Unnecessary copy is removed from Eth1DataHasEnoughSupport. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16118)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Incorrect constructor return type [#16084](https://github.com/OffchainLabs/prysm/pull/16084). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16084)
|
||||
- Fixed possible race when validating two attestations at the same time. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16105)
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16126)
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16141)
|
||||
- Fixed replay state issue in rest api caused by attester and sync committee duties endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16136)
|
||||
- Do not error when committee has been computed correctly but updating the cache failed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16142)
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16144)
|
||||
|
||||
## [v7.1.0](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.1.0) - 2025-12-10
|
||||
|
||||
This release includes several key features/fixes. If you are running v7.0.0 then you should update to v7.0.1 or later and remove the flag `--disable-last-epoch-targets`.
|
||||
|
||||
@@ -72,7 +72,7 @@ Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should li
|
||||
|
||||
To enable conditional compilation and custom configuration for tests (where compiled code has more
|
||||
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
|
||||
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
|
||||
(see official docs on [build constraints](https://pkg.go.dev/go/build#hdr-Build_Constraints)).
|
||||
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -9,7 +9,7 @@ This README details how to setup Prysm for interop testing for usage with other
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
|
||||
1. Install [Bazel](https://bazel.build/install) **(Recommended)**
|
||||
2. `git clone https://github.com/OffchainLabs/prysm && cd prysm`
|
||||
3. `bazel build //cmd/...`
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://go.dev/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
|
||||
See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
@@ -23,7 +23,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://prysm.offchainlabs.com/docs/)**.
|
||||
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
|
||||
|
||||
@@ -51,7 +51,7 @@ Prysm maintains two permanent branches:
|
||||
|
||||
### 🛠 Contribution Guide
|
||||
|
||||
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
Want to get involved? Check out our **[Contribution Guide](https://prysm.offchainlabs.com/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
|
||||
---
|
||||
|
||||
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0"
|
||||
consensus_spec_version = "v1.7.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
|
||||
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
|
||||
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
|
||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
|
||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -423,10 +423,6 @@ load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies(go_sdk = "go_sdk")
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
|
||||
9
api/apiutil/log.go
Normal file
9
api/apiutil/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package apiutil
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/apiutil")
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package beacon
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/beacon")
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"bid.go",
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client/builder",
|
||||
@@ -63,6 +64,5 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
@@ -70,7 +70,7 @@ type requestLogger struct{}
|
||||
func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
@@ -87,7 +87,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
return err
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
|
||||
9
api/client/builder/log.go
Normal file
9
api/client/builder/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package builder
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/builder")
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"event_stream.go",
|
||||
"log.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client/event",
|
||||
@@ -23,8 +24,5 @@ go_test(
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNewEventStream(t *testing.T) {
|
||||
|
||||
9
api/client/event/log.go
Normal file
9
api/client/event/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package event
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/event")
|
||||
@@ -3,12 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -17,12 +21,17 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["grpcutils_test.go"],
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
172
api/grpc/grpc_connection_provider.go
Normal file
172
api/grpc/grpc_connection_provider.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SetHost(index int) error
|
||||
// Close closes the current connection.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mu)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mu sync.Mutex
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider with multiple endpoints")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
|
||||
// Close existing connection if any
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
|
||||
p.currentIndex = uint64(index)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
p.closed.Store(true)
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
return pkgErrors.Wrapf(err, "failed to close connection to %s", p.endpoints[p.currentIndex])
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
203
api/grpc/grpc_connection_provider_test.go
Normal file
203
api/grpc/grpc_connection_provider_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SetHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
_ = provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("SetHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SetHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SetHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
require.NoError(t, provider.Close())
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
require.NoError(t, provider.Close()) // Double close is safe
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func LogRequests(
|
||||
)
|
||||
start := time.Now()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
log.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).WithField("duration", time.Since(start)).
|
||||
Debug("gRPC request finished.")
|
||||
return err
|
||||
@@ -58,7 +58,7 @@ func LogStream(
|
||||
grpc.Header(&header),
|
||||
)
|
||||
strm, err := streamer(ctx, sd, conn, method, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
log.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).
|
||||
Debug("gRPC stream started.")
|
||||
return strm, err
|
||||
@@ -71,7 +71,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
log.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
|
||||
|
||||
9
api/grpc/log.go
Normal file
9
api/grpc/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package grpc
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/grpc")
|
||||
20
api/grpc/mock_grpc_provider.go
Normal file
20
api/grpc/mock_grpc_provider.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SetHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() error { return nil }
|
||||
33
api/rest/BUILD.bazel
Normal file
33
api/rest/BUILD.bazel
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
23
api/rest/mock_rest_provider.go
Normal file
23
api/rest/mock_rest_provider.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package rest
|
||||
|
||||
import "net/http"
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SetHost(index int) error { m.HostIndex = index; return nil }
|
||||
func (m *MockRestProvider) NextHost() { m.HostIndex = (m.HostIndex + 1) % len(m.MockHosts) }
|
||||
177
api/rest/rest_connection_provider.go
Normal file
177
api/rest/rest_connection_provider.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "rest")
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
SetHost(index int) error
|
||||
// NextHost switches to the next endpoint in round-robin fashion.
|
||||
NextHost()
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider with endpoints")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
idx := p.currentIndex.Load() % uint64(len(p.endpoints))
|
||||
return p.endpoints[idx]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx%uint64(len(p.endpoints))],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Trying REST endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) NextHost() {
|
||||
oldIdx := p.currentIndex.Load()
|
||||
newIdx := (oldIdx + 1) % uint64(len(p.endpoints))
|
||||
p.currentIndex.Store(newIdx)
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[newIdx])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[newIdx],
|
||||
}).Debug("Switched to next REST endpoint")
|
||||
}
|
||||
85
api/rest/rest_connection_provider_test.go
Normal file
85
api/rest/rest_connection_provider_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("NextHost circular", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(0)) // Reset to start
|
||||
expected := []string{"http://host2:3500", "http://host3:3500", "http://host1:3500", "http://host2:3500"}
|
||||
for i, exp := range expected {
|
||||
provider.NextHost()
|
||||
assert.Equal(t, exp, provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package beacon_api
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -21,8 +21,10 @@ import (
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// RestHandler defines the interface for making REST API requests.
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
||||
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
|
||||
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
||||
@@ -31,26 +33,31 @@ type RestHandler interface {
|
||||
SetHost(host string)
|
||||
}
|
||||
|
||||
type BeaconApiRestHandler struct {
|
||||
type restHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
reqOverrides []reqOption
|
||||
}
|
||||
|
||||
// NewBeaconApiRestHandler returns a RestHandler
|
||||
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
|
||||
brh := &BeaconApiRestHandler{
|
||||
// newRestHandler returns a RestHandler (internal use)
|
||||
func newRestHandler(client http.Client, host string) RestHandler {
|
||||
return NewRestHandler(client, host)
|
||||
}
|
||||
|
||||
// NewRestHandler returns a RestHandler
|
||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
||||
rh := &restHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
brh.appendAcceptOverride()
|
||||
return brh
|
||||
rh.appendAcceptOverride()
|
||||
return rh
|
||||
}
|
||||
|
||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||
func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
func (c *restHandler) appendAcceptOverride() {
|
||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||
req.Header.Set("Accept", accept)
|
||||
@@ -59,18 +66,18 @@ func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
|
||||
func (c *restHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *BeaconApiRestHandler) Host() string {
|
||||
func (c *restHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -90,7 +97,29 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
|
||||
return decodeResp(httpResp, resp)
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to create request for endpoint %s", url)
|
||||
}
|
||||
req.Header.Set("User-Agent", version.BuildData())
|
||||
httpResp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to perform request for endpoint %s", url)
|
||||
}
|
||||
defer func() {
|
||||
if err := httpResp.Body.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -145,7 +174,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Post(
|
||||
func (c *restHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -181,7 +210,7 @@ func (c *BeaconApiRestHandler) Post(
|
||||
}
|
||||
|
||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||
func (c *BeaconApiRestHandler) PostSSZ(
|
||||
func (c *restHandler) PostSSZ(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -282,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) SetHost(host string) {
|
||||
func (c *restHandler) SetHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package httprest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "httprest")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/server/httprest")
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
@@ -27,6 +28,5 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
9
api/server/middleware/log.go
Normal file
9
api/server/middleware/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package middleware
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/server/middleware")
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// frozenHeaderRecorder allows asserting that response headers were not modified
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"debounce.go",
|
||||
"every.go",
|
||||
"log.go",
|
||||
"multilock.go",
|
||||
"scatter.go",
|
||||
],
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RunEvery runs the provided command periodically.
|
||||
|
||||
9
async/log.go
Normal file
9
async/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package async
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "async")
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"log_helpers.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
@@ -48,7 +49,6 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -66,7 +66,6 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -75,7 +74,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -148,8 +146,6 @@ go_test(
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -177,7 +173,6 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844_v2//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -1,164 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() >= version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
fields := logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber(),
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
fields["withdrawals"] = len(withdrawals)
|
||||
changes, err := block.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/blockchain")
|
||||
|
||||
162
beacon-chain/blockchain/log_helpers.go
Normal file
162
beacon-chain/blockchain/log_helpers.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() >= version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
fields := logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber(),
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
fields["withdrawals"] = len(withdrawals)
|
||||
changes, err := block.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has attestation",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -42,7 +42,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has deposit",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -62,7 +62,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has proposer slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -71,7 +71,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain proposerSlashings=1 slot=0",
|
||||
},
|
||||
{name: "has exit",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -80,7 +80,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has everything",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -93,11 +93,11 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 package=beacon-chain/blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain payloadHash=0x010203 slot=0 syncBitsCount=0 txCount=2",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -221,6 +221,19 @@ var (
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32},
|
||||
},
|
||||
)
|
||||
commitmentCount = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "commitment_count_max_21",
|
||||
Help: "The number of blob KZG commitments per block.",
|
||||
Buckets: []float64{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21},
|
||||
},
|
||||
)
|
||||
maxBlobsPerBlock = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "max_blobs_per_block",
|
||||
Help: "The maximum number of blobs allowed in a block.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -138,14 +136,6 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecProofsPool to keep track of execution proofs.
|
||||
func WithExecProofsPool(p execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecProofsPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -276,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.OperationNotifier = operationNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -17,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -655,8 +652,6 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
//
|
||||
// EIP-8025: After Fulu, also checks for execution proofs availability.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
@@ -669,12 +664,7 @@ func (s *Service) isDataAvailable(
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// After checking data columns, check execution proofs availability.
|
||||
return s.areExecutionProofsAvailable(ctx, root)
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -908,70 +898,6 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
|
||||
// or an error or context cancellation occurs.
|
||||
// This check is only performed for lightweight verifier nodes that need zkVM proofs
|
||||
// to validate block execution (nodes without execution layer + proof generation capability).
|
||||
// A nil result means that the data availability check is successful.
|
||||
func (s *Service) areExecutionProofsAvailable(
|
||||
ctx context.Context,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
// Return early if zkVM features are disabled (no need to check for execution proofs),
|
||||
// or if the generation proof is enabled (we will generate proofs ourselves).
|
||||
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
requiredProofCount := params.BeaconConfig().MinProofsRequired
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"requiredProofCount": requiredProofCount,
|
||||
})
|
||||
|
||||
// Subscribe to execution proof received events.
|
||||
eventsChan := make(chan *feed.Event, 1)
|
||||
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Return early if we already have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some proofs are missing; wait for them.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case event := <-eventsChan:
|
||||
if event.Type != operation.ExecutionProofReceived {
|
||||
continue
|
||||
}
|
||||
|
||||
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
|
||||
if !ok {
|
||||
log.Error("Could not cast event data to ExecutionProofReceivedData")
|
||||
continue
|
||||
}
|
||||
|
||||
proof := proofWrapper.ExecutionProof
|
||||
|
||||
// Skip if the proof is for a different block.
|
||||
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return if we have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -3003,113 +3001,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Insert MinProofsRequired execution proofs into the pool
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
}
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{
|
||||
WithOperationNotifier(&mock.MockOperationNotifier{}),
|
||||
},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Goroutine to send execution proofs after data columns are available
|
||||
go func() {
|
||||
// Wait a bit to simulate async proof arrival
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Send ExecutionProofReceived events
|
||||
opfeed := service.cfg.OperationNotifier.OperationFeed()
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
|
||||
opfeed.Send(&feed.Event{
|
||||
Type: operation.ExecutionProofReceived,
|
||||
Data: &operation.ExecutionProofReceivedData{
|
||||
ExecutionProof: proof,
|
||||
},
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
|
||||
@@ -66,54 +66,53 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -258,32 +259,55 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, blo
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
signedBlock interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
receivedTime time.Time,
|
||||
daWaitedTime time.Duration,
|
||||
) {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
log.WithField("blockRoot", blockRoot).Error("Nil block")
|
||||
return
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
reportSlotMetrics(block.Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block, blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
|
||||
// Log payload data
|
||||
if err := logPayload(block.Block()); err != nil {
|
||||
if err := logPayload(block); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(block.Block()); err != nil {
|
||||
if err := logStateTransitionData(block); err != nil {
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
log.WithField("blockRoot", blockRoot).Error("Nil block body")
|
||||
return
|
||||
}
|
||||
|
||||
commitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to get blob KZG commitments")
|
||||
}
|
||||
|
||||
commitmentCount.Observe(float64(len(commitments)))
|
||||
maxBlobsPerBlock.Set(float64(params.BeaconConfig().MaxBlobsPerBlock(block.Slot())))
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
@@ -130,12 +131,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -222,10 +221,10 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -265,10 +264,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -512,8 +511,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -552,8 +552,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -596,13 +597,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
// Wait for the light client bootstrap to be saved (runs in goroutine)
|
||||
var b interfaces.LightClientBootstrap
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
return err == nil && b != nil
|
||||
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -25,14 +23,12 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -87,11 +83,9 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
ExecProofsPool execproofs.PoolManager
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
@@ -215,9 +209,7 @@ func (s *Service) Start() {
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
go s.spawnProcessAttestationsRoutine()
|
||||
go s.spawnFinalizedProofsPruningRoutine()
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
@@ -297,19 +289,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -474,73 +453,6 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -557,62 +469,3 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
|
||||
// It removes proofs older than the finalized checkpoint to prevent unbounded
|
||||
// memory growth.
|
||||
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
|
||||
func (s *Service) spawnFinalizedProofsPruningRoutine() {
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
// Only prune at the start of each epoch
|
||||
if !slots.IsEpochStart(slot) {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedCheckpoint := s.FinalizedCheckpt()
|
||||
if finalizedCheckpoint == nil {
|
||||
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get finalized slot")
|
||||
continue
|
||||
}
|
||||
|
||||
// Prune proofs older than finalized slot
|
||||
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedCount": count,
|
||||
"finalizedSlot": finalizedSlot,
|
||||
}).Debug("Pruned finalized execution proofs")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,11 +23,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -596,218 +594,3 @@ func TestNotifyIndex(t *testing.T) {
|
||||
t.Errorf("Notifier channel did not receive the index")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
const (
|
||||
fuluForkEpoch = 10
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
pbBlock := util.NewBeaconBlock()
|
||||
pbBlock.Block.Slot = 12
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CGC increases before fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("CGC increases after fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
srcs = [
|
||||
"log.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
|
||||
9
beacon-chain/blockchain/testing/log.go
Normal file
9
beacon-chain/blockchain/testing/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package testing
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/blockchain/testing")
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrNilState = errors.New("nil state")
|
||||
@@ -267,7 +266,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -296,7 +295,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
if err := s.DB.SaveBlock(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = b
|
||||
@@ -328,7 +327,7 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -585,11 +584,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
log.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
log.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package builder
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "builder")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/builder")
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -16,6 +16,7 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
|
||||
1
beacon-chain/cache/attestation.go
vendored
1
beacon-chain/cache/attestation.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type attGroup struct {
|
||||
|
||||
1
beacon-chain/cache/committee.go
vendored
1
beacon-chain/cache/committee.go
vendored
@@ -17,7 +17,6 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"deposit_pruner.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"log.go",
|
||||
"merkle_tree.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/cache/depositsnapshot",
|
||||
|
||||
@@ -20,7 +20,6 @@ var (
|
||||
Name: "beacondb_all_deposits_eip4881",
|
||||
Help: "The number of total deposits in memory",
|
||||
})
|
||||
log = logrus.WithField("prefix", "cache")
|
||||
)
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
|
||||
9
beacon-chain/cache/depositsnapshot/log.go
vendored
Normal file
9
beacon-chain/cache/depositsnapshot/log.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package depositsnapshot
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/cache/depositsnapshot")
|
||||
9
beacon-chain/cache/log.go
vendored
Normal file
9
beacon-chain/cache/log.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package cache
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/cache")
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
|
||||
5
beacon-chain/cache/tracked_validators.go
vendored
5
beacon-chain/cache/tracked_validators.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -67,7 +66,7 @@ func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (Tra
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
log.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
@@ -113,7 +112,7 @@ func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
log.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"deposit.go",
|
||||
"epoch_precompute.go",
|
||||
"epoch_spec.go",
|
||||
"log.go",
|
||||
"reward.go",
|
||||
"sync_committee.go",
|
||||
"transition.go",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
|
||||
|
||||
9
beacon-chain/core/altair/log.go
Normal file
9
beacon-chain/core/altair/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package altair
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/altair")
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package blocks
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "blocks")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/blocks")
|
||||
|
||||
@@ -9,9 +9,9 @@ go_library(
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"error.go",
|
||||
"log.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
@@ -61,7 +61,6 @@ go_test(
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessDeposits is one of the operations performed on each processed
|
||||
|
||||
@@ -6,6 +6,11 @@ type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NewExecReqError creates a new execReqErr.
|
||||
func NewExecReqError(msg string) error {
|
||||
return execReqErr{errors.New(msg)}
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
|
||||
9
beacon-chain/core/electra/log.go
Normal file
9
beacon-chain/core/electra/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package electra
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/electra")
|
||||
@@ -1,60 +0,0 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
|
||||
@@ -46,9 +46,6 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
|
||||
ExecutionProofReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
|
||||
type ExecutionProofReceivedData struct {
|
||||
ExecutionProof *ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
|
||||
45
beacon-chain/core/gloas/BUILD.bazel
Normal file
45
beacon-chain/core/gloas/BUILD.bazel
Normal file
@@ -0,0 +1,45 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["bid.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["bid_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
193
beacon-chain/core/gloas/bid.go
Normal file
193
beacon-chain/core/gloas/bid.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
||||
//
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
// assert bid.slot == block.slot
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
// if amount > 0:
|
||||
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
|
||||
// state.latest_execution_payload_bid = bid
|
||||
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get signed execution payload bid")
|
||||
}
|
||||
|
||||
wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to wrap signed bid")
|
||||
}
|
||||
|
||||
bid, err := wrappedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid from wrapped bid")
|
||||
}
|
||||
|
||||
builderIndex := bid.BuilderIndex()
|
||||
amount := bid.Value()
|
||||
|
||||
if builderIndex == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
if amount != 0 {
|
||||
return fmt.Errorf("self-build amount must be zero, got %d", amount)
|
||||
}
|
||||
if wrappedBid.Signature() != common.InfiniteSignature {
|
||||
return errors.New("self-build signature must be point at infinity")
|
||||
}
|
||||
} else {
|
||||
ok, err := st.IsActiveBuilder(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "builder active check failed")
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("builder %d is not active", builderIndex)
|
||||
}
|
||||
|
||||
ok, err = st.CanBuilderCoverBid(builderIndex, amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "builder balance check failed")
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
|
||||
}
|
||||
|
||||
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
return errors.Wrap(err, "bid signature validation failed")
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||
return errors.Wrap(err, "bid consistency validation failed")
|
||||
}
|
||||
|
||||
if amount > 0 {
|
||||
feeRecipient := bid.FeeRecipient()
|
||||
pendingPayment := ðpb.BuilderPendingPayment{
|
||||
Weight: 0,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: feeRecipient[:],
|
||||
Amount: amount,
|
||||
BuilderIndex: builderIndex,
|
||||
},
|
||||
}
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (bid.Slot() % params.BeaconConfig().SlotsPerEpoch)
|
||||
if err := st.SetBuilderPendingPayment(slotIndex, pendingPayment); err != nil {
|
||||
return errors.Wrap(err, "failed to set pending payment")
|
||||
}
|
||||
}
|
||||
|
||||
if err := st.SetExecutionPayloadBid(bid); err != nil {
|
||||
return errors.Wrap(err, "failed to cache execution payload bid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateBidConsistency checks that the bid is consistent with the current beacon state.
|
||||
func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayloadBid, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if bid.Slot() != block.Slot() {
|
||||
return fmt.Errorf("bid slot %d does not match block slot %d", bid.Slot(), block.Slot())
|
||||
}
|
||||
|
||||
latestBlockHash, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get latest block hash")
|
||||
}
|
||||
if bid.ParentBlockHash() != latestBlockHash {
|
||||
return fmt.Errorf("bid parent block hash mismatch: got %x, expected %x",
|
||||
bid.ParentBlockHash(), latestBlockHash)
|
||||
}
|
||||
|
||||
if bid.ParentBlockRoot() != block.ParentRoot() {
|
||||
return fmt.Errorf("bid parent block root mismatch: got %x, expected %x",
|
||||
bid.ParentBlockRoot(), block.ParentRoot())
|
||||
}
|
||||
|
||||
randaoMix, err := helpers.RandaoMix(st, slots.ToEpoch(st.Slot()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get randao mix")
|
||||
}
|
||||
if bid.PrevRandao() != [32]byte(randaoMix) {
|
||||
return fmt.Errorf("bid prev randao mismatch: got %x, expected %x", bid.PrevRandao(), randaoMix)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// It validates that the signature was created by the builder specified in the bid
|
||||
// using the appropriate domain for the beacon builder.
|
||||
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
bid, err := signedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid")
|
||||
}
|
||||
|
||||
pubkey, err := st.BuilderPubkey(bid.BuilderIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get builder pubkey")
|
||||
}
|
||||
|
||||
publicKey, err := bls.PublicKeyFromBytes(pubkey[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid builder public key")
|
||||
}
|
||||
|
||||
signatureBytes := signedBid.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid signature format")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(bid.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to compute signing domain")
|
||||
}
|
||||
|
||||
signingRoot, err := signedBid.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to compute signing root")
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
633
beacon-chain/core/gloas/bid_test.go
Normal file
633
beacon-chain/core/gloas/bid_test.go
Normal file
@@ -0,0 +1,633 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/validator-client"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type stubBlockBody struct {
|
||||
signedBid *ethpb.SignedExecutionPayloadBid
|
||||
}
|
||||
|
||||
func (s stubBlockBody) Version() int { return version.Gloas }
|
||||
func (s stubBlockBody) RandaoReveal() [96]byte { return [96]byte{} }
|
||||
func (s stubBlockBody) Eth1Data() *ethpb.Eth1Data { return nil }
|
||||
func (s stubBlockBody) Graffiti() [32]byte { return [32]byte{} }
|
||||
func (s stubBlockBody) ProposerSlashings() []*ethpb.ProposerSlashing { return nil }
|
||||
func (s stubBlockBody) AttesterSlashings() []ethpb.AttSlashing { return nil }
|
||||
func (s stubBlockBody) Attestations() []ethpb.Att { return nil }
|
||||
func (s stubBlockBody) Deposits() []*ethpb.Deposit { return nil }
|
||||
func (s stubBlockBody) VoluntaryExits() []*ethpb.SignedVoluntaryExit { return nil }
|
||||
func (s stubBlockBody) SyncAggregate() (*ethpb.SyncAggregate, error) { return nil, nil }
|
||||
func (s stubBlockBody) IsNil() bool { return s.signedBid == nil }
|
||||
func (s stubBlockBody) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
|
||||
func (s stubBlockBody) Proto() (proto.Message, error) { return nil, nil }
|
||||
func (s stubBlockBody) Execution() (interfaces.ExecutionData, error) { return nil, nil }
|
||||
func (s stubBlockBody) BLSToExecutionChanges() ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) BlobKzgCommitments() ([][]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) ExecutionRequests() (*enginev1.ExecutionRequests, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) PayloadAttestations() ([]*ethpb.PayloadAttestation, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) SignedExecutionPayloadBid() (*ethpb.SignedExecutionPayloadBid, error) {
|
||||
return s.signedBid, nil
|
||||
}
|
||||
func (s stubBlockBody) MarshalSSZ() ([]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) UnmarshalSSZ([]byte) error { return nil }
|
||||
func (s stubBlockBody) SizeSSZ() int { return 0 }
|
||||
|
||||
type stubBlock struct {
|
||||
slot primitives.Slot
|
||||
proposer primitives.ValidatorIndex
|
||||
parentRoot [32]byte
|
||||
body stubBlockBody
|
||||
v int
|
||||
}
|
||||
|
||||
var (
|
||||
_ interfaces.ReadOnlyBeaconBlockBody = (*stubBlockBody)(nil)
|
||||
_ interfaces.ReadOnlyBeaconBlock = (*stubBlock)(nil)
|
||||
)
|
||||
|
||||
func (s stubBlock) Slot() primitives.Slot { return s.slot }
|
||||
func (s stubBlock) ProposerIndex() primitives.ValidatorIndex { return s.proposer }
|
||||
func (s stubBlock) ParentRoot() [32]byte { return s.parentRoot }
|
||||
func (s stubBlock) StateRoot() [32]byte { return [32]byte{} }
|
||||
func (s stubBlock) Body() interfaces.ReadOnlyBeaconBlockBody { return s.body }
|
||||
func (s stubBlock) IsNil() bool { return false }
|
||||
func (s stubBlock) IsBlinded() bool { return false }
|
||||
func (s stubBlock) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
|
||||
func (s stubBlock) Proto() (proto.Message, error) { return nil, nil }
|
||||
func (s stubBlock) MarshalSSZ() ([]byte, error) { return nil, nil }
|
||||
func (s stubBlock) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
|
||||
func (s stubBlock) UnmarshalSSZ([]byte) error { return nil }
|
||||
func (s stubBlock) SizeSSZ() int { return 0 }
|
||||
func (s stubBlock) Version() int { return s.v }
|
||||
func (s stubBlock) AsSignRequestObject() (validatorpb.SignRequestObject, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlock) HashTreeRootWith(*fastssz.Hasher) error { return nil }
|
||||
|
||||
func buildGloasState(t *testing.T, slot primitives.Slot, proposerIdx primitives.ValidatorIndex, builderIdx primitives.BuilderIndex, balance uint64, randao [32]byte, latestHash [32]byte, builderPubkey [48]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = bytes.Repeat([]byte{0xAA}, 32)
|
||||
stateRoots[i] = bytes.Repeat([]byte{0xBB}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = randao[:]
|
||||
}
|
||||
|
||||
withdrawalCreds := make([]byte, 32)
|
||||
withdrawalCreds[0] = cfg.BuilderWithdrawalPrefixByte
|
||||
|
||||
validatorCount := int(proposerIdx) + 1
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := range validatorCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: builderPubkey[:],
|
||||
WithdrawalCredentials: withdrawalCreds,
|
||||
EffectiveBalance: balance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 0,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: cfg.FarFutureEpoch,
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
}
|
||||
balances[i] = balance
|
||||
}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{}}
|
||||
}
|
||||
|
||||
var builders []*ethpb.Builder
|
||||
if builderIdx != params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
builderCount := int(builderIdx) + 1
|
||||
builders = make([]*ethpb.Builder, builderCount)
|
||||
builders[builderCount-1] = ðpb.Builder{
|
||||
Pubkey: builderPubkey[:],
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x01}, 20),
|
||||
Balance: primitives.Gwei(balance),
|
||||
DepositEpoch: 0,
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, 32),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x22}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x22}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
LatestBlockHash: latestHash[:],
|
||||
BuilderPendingPayments: payments,
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
Builders: builders,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return st.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid, fork *ethpb.Fork, genesisRoot [32]byte) [96]byte {
|
||||
t.Helper()
|
||||
epoch := slots.ToEpoch(primitives.Slot(bid.Slot))
|
||||
domain, err := signing.Domain(fork, epoch, params.BeaconConfig().DomainBeaconBuilder, genesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(root[:]).Marshal()
|
||||
var out [96]byte
|
||||
copy(out[:], sig)
|
||||
return out
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
pubKey := [48]byte{}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
require.NoError(t, ProcessExecutionPayloadBid(state, block))
|
||||
|
||||
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
|
||||
require.Equal(t, primitives.Gwei(0), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
||||
slot := primitives.Slot(2)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte{}
|
||||
latestHash := [32]byte{1}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err := ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "self-build amount must be zero", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
||||
slot := primitives.Slot(8)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := sk.PublicKey().Marshal()
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], pub)
|
||||
|
||||
balance := params.BeaconConfig().MinActivationBalance + 1_000_000
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: sig[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx, // not self-build
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
require.NoError(t, ProcessExecutionPayloadBid(state, block))
|
||||
|
||||
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
|
||||
require.Equal(t, primitives.Gwei(500_000), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
|
||||
|
||||
require.NotNil(t, stateProto.LatestExecutionPayloadBid)
|
||||
require.Equal(t, primitives.BuilderIndex(1), stateProto.LatestExecutionPayloadBid.BuilderIndex)
|
||||
require.Equal(t, primitives.Gwei(500_000), stateProto.LatestExecutionPayloadBid.Value)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
||||
slot := primitives.Slot(4)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0x01}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0x02}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
// Make builder inactive by setting withdrawable_epoch.
|
||||
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
stateProto.Builders[int(builderIdx)].WithdrawableEpoch = 0
|
||||
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
|
||||
require.NoError(t, err)
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "is not active", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
||||
slot := primitives.Slot(5)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0x0A}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0x0B}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+10, randao, latestHash, pubKey)
|
||||
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
// Add pending balances to push below required balance.
|
||||
stateProto.BuilderPendingWithdrawals = []*ethpb.BuilderPendingWithdrawal{
|
||||
{Amount: 15, BuilderIndex: builderIdx},
|
||||
}
|
||||
stateProto.BuilderPendingPayments = []*ethpb.BuilderPendingPayment{
|
||||
{Withdrawal: ðpb.BuilderPendingWithdrawal{Amount: 20, BuilderIndex: builderIdx}},
|
||||
}
|
||||
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
|
||||
require.NoError(t, err)
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "cannot cover bid amount", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
slot := primitives.Slot(6)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
// Use an invalid signature.
|
||||
invalidSig := [96]byte{1}
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: invalidSig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "bid signature validation failed", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "bid slot", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(11)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "parent block hash mismatch", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bytes.Repeat([]byte{0x99}, 32)), // mismatch
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "parent block root mismatch", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(13)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "prev randao mismatch", err)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
|
||||
9
beacon-chain/core/helpers/log.go
Normal file
9
beacon-chain/core/helpers/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package helpers
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/helpers")
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,6 +3,8 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"electra.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"skip_slot_cache.go",
|
||||
"state.go",
|
||||
@@ -62,6 +64,8 @@ go_test(
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"bellatrix_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"electra_test.go",
|
||||
"exports_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package electra
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -47,7 +48,7 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
@@ -63,59 +64,60 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
st, err = ProcessBLSToExecutionChanges(st, block)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
// new in electra
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
return nil, electra.NewExecReqError("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||
}
|
||||
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, errors.New("nil consolidation request")
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
216
beacon-chain/core/transition/electra_test.go
Normal file
216
beacon-chain/core/transition/electra_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestElectraOperations_ProcessingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
|
||||
errCheck func(t *testing.T, err error)
|
||||
}{
|
||||
{
|
||||
name: "ErrProcessProposerSlashingsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create invalid proposer slashing with out-of-bounds proposer index
|
||||
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999, // Invalid index (out of bounds)
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process proposer slashings failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessAttestationsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create attestation with invalid committee index
|
||||
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
|
||||
{
|
||||
AggregationBits: []byte{0b00000001},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 999999, // Invalid committee index
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: []byte{0b00000001},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process attestations failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessDepositsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create deposit with invalid proof length
|
||||
blk.Block.Body.Deposits = []*ethpb.Deposit{
|
||||
{
|
||||
Proof: [][]byte{}, // Invalid: empty proof
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 32000000000, // 32 ETH in Gwei
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process deposits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessVoluntaryExitsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create voluntary exit with invalid validator index
|
||||
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process voluntary exits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessBLSChangesFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create BLS to execution change with invalid validator index
|
||||
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
|
||||
{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process BLS to execution changes failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(1)))
|
||||
|
||||
_, err = transition.ElectraOperations(ctx, st, b.Block())
|
||||
require.NotNil(t, err, "Expected an error but got nil")
|
||||
tc.errCheck(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
19
beacon-chain/core/transition/errors.go
Normal file
19
beacon-chain/core/transition/errors.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transition
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
|
||||
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
|
||||
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
|
||||
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
|
||||
ErrProcessRandaoFailed = errors.New("process randao failed")
|
||||
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
|
||||
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
|
||||
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
|
||||
ErrProcessAttestationsFailed = errors.New("process attestations failed")
|
||||
ErrProcessDepositsFailed = errors.New("process deposits failed")
|
||||
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
|
||||
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
|
||||
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
|
||||
)
|
||||
3
beacon-chain/core/transition/exports_test.go
Normal file
3
beacon-chain/core/transition/exports_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package transition
|
||||
|
||||
var ElectraOperations = electraOperations
|
||||
@@ -1,10 +1,9 @@
|
||||
// Package interop contains useful utilities for persisting
|
||||
// ssz-encoded states and blocks to disk during each state
|
||||
// transition for development purposes.
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package interop
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "interop")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/transition/interop")
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package transition
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "state")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/transition")
|
||||
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -70,10 +69,11 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
set := sigSlice.Batch()
|
||||
|
||||
// State root validation.
|
||||
postStateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
// assert block.state_root == hash_tree_root(state)
|
||||
func CalculateStateRoot(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
rollback state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
|
||||
@@ -122,7 +122,7 @@ func CalculateStateRoot(
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if state == nil || state.IsNil() {
|
||||
if rollback == nil || rollback.IsNil() {
|
||||
return [32]byte{}, errors.New("nil state")
|
||||
}
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
@@ -130,7 +130,7 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Copy state to avoid mutating the state reference.
|
||||
state = state.Copy()
|
||||
state := rollback.Copy()
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
@@ -141,12 +141,101 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if features.Get().EnableProposerPreprocessing {
|
||||
state, err = processBlockForProposing(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
|
||||
}
|
||||
} else {
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
}
|
||||
return state.HashTreeRoot(ctx)
|
||||
}
|
||||
|
||||
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
|
||||
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
var set BlockSignatureBatches
|
||||
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
return nil, err
|
||||
}
|
||||
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
|
||||
sigSet := set.Batch()
|
||||
valid, err := sigSet.Verify()
|
||||
if err != nil || valid {
|
||||
return st, err
|
||||
}
|
||||
// Some signature failed to verify.
|
||||
// Verify Attestations signatures
|
||||
attSigs := set.AttestationSignatures
|
||||
if attSigs == nil {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
valid, err = attSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
|
||||
return state.HashTreeRoot(ctx)
|
||||
// Verify Randao signature
|
||||
randaoSigs := set.RandaoSignatures
|
||||
if randaoSigs == nil {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
valid, err = randaoSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
|
||||
if signed.Block().Version() < version.Capella {
|
||||
//This should not happen as we must have failed one of the above signatures.
|
||||
return st, nil
|
||||
}
|
||||
// Verify BLS to execution changes signatures
|
||||
blsChangeSigs := set.BLSChangeSignatures
|
||||
if blsChangeSigs == nil {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
valid, err = blsChangeSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
// We should not reach this point as one of the above signatures must have failed.
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
|
||||
type BlockSignatureBatches struct {
|
||||
RandaoSignatures *bls.SignatureBatch
|
||||
AttestationSignatures *bls.SignatureBatch
|
||||
BLSChangeSignatures *bls.SignatureBatch
|
||||
}
|
||||
|
||||
// Batch returns the batch of signature batches in the BlockSignatureBatches.
|
||||
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
|
||||
sigs := bls.NewSet()
|
||||
if b.RandaoSignatures != nil {
|
||||
sigs.Join(b.RandaoSignatures)
|
||||
}
|
||||
if b.AttestationSignatures != nil {
|
||||
sigs.Join(b.AttestationSignatures)
|
||||
}
|
||||
if b.BLSChangeSignatures != nil {
|
||||
sigs.Join(b.BLSChangeSignatures)
|
||||
}
|
||||
return sigs
|
||||
}
|
||||
|
||||
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
|
||||
@@ -165,48 +254,48 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
) (BlockSignatureBatches, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
set := BlockSignatureBatches{}
|
||||
if err := blocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
if st.Version() != signed.Block().Version() {
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
}
|
||||
|
||||
blk := signed.Block()
|
||||
st, err := ProcessBlockForStateRoot(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return set, nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
set.RandaoSignatures = rSet
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
set.AttestationSignatures = aSet
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
cSet, err := b.BLSChangesSignatureBatch(st, changes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
}
|
||||
set.Join(cSet)
|
||||
set.BLSChangeSignatures = cSet
|
||||
}
|
||||
return set, st, nil
|
||||
}
|
||||
@@ -268,7 +357,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -326,7 +415,7 @@ func ProcessBlockForStateRoot(
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
@@ -338,13 +427,13 @@ func ProcessBlockForStateRoot(
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not verify and process randao")
|
||||
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process eth1 data")
|
||||
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
|
||||
}
|
||||
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
|
||||
@@ -363,7 +452,7 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -379,31 +468,35 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
@@ -411,32 +504,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
var err error
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
var exitInfo *validators.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +132,8 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Could not verify signature set.")
|
||||
}
|
||||
@@ -145,7 +146,8 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
@@ -154,8 +156,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
set := signatures.Batch()
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package das
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "das")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/das")
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package filesystem
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "filesystem")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/db/filesystem")
|
||||
|
||||
@@ -89,6 +89,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
|
||||
@@ -96,6 +97,7 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
SlotInDiffTree(primitives.Slot) (uint64, int, error)
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"testing_helpers.go",
|
||||
"utils.go",
|
||||
"validated_checkpoint.go",
|
||||
"wss.go",
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package kv
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "db")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/db/kv")
|
||||
|
||||
@@ -1053,6 +1053,10 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
st, err := s.stateByDiff(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1070,6 +1074,10 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return false, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
stateLvl := computeLevel(s.getOffset(), slot)
|
||||
return stateLvl != -1, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user