mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
119 Commits
fixE2E
...
Stale_PR_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f265eb7a84 | ||
|
|
6562036c54 | ||
|
|
b8aad84285 | ||
|
|
5f0d6074d6 | ||
|
|
9d6a2f5390 | ||
|
|
490ddbf782 | ||
|
|
adc875b20d | ||
|
|
8cd249c1c8 | ||
|
|
305d5850e7 | ||
|
|
df3a9f218d | ||
|
|
ae451a3a02 | ||
|
|
17561a6576 | ||
|
|
b842b7ea01 | ||
|
|
9bbe12e28c | ||
|
|
0674cf64cc | ||
|
|
3413d05b34 | ||
|
|
070a765d24 | ||
|
|
8ac1647436 | ||
|
|
dfe31c9242 | ||
|
|
b7866be3a9 | ||
|
|
8413660d5f | ||
|
|
e037491756 | ||
|
|
ea2624b5ab | ||
|
|
1b40f941cf | ||
|
|
57830435d7 | ||
|
|
44d850de51 | ||
|
|
b08e691127 | ||
|
|
968e82b02d | ||
|
|
de04ce8329 | ||
|
|
5efecff631 | ||
|
|
3ab759e163 | ||
|
|
836d369c6c | ||
|
|
568273453b | ||
|
|
7a4ecb6060 | ||
|
|
82f0ea5b11 | ||
|
|
6fddd13cb2 | ||
|
|
43c7659d18 | ||
|
|
2d15e53dab | ||
|
|
2f2152e039 | ||
|
|
2542189efc | ||
|
|
8e6d39a44b | ||
|
|
c35889d4c6 | ||
|
|
10dedd5ced | ||
|
|
d2966a4c5b | ||
|
|
62b5c43d87 | ||
|
|
b04baa93cd | ||
|
|
2e84208169 | ||
|
|
2265af58ae | ||
|
|
4d190c41cc | ||
|
|
0fbb27d8e3 | ||
|
|
3df3e84270 | ||
|
|
30cc23c5de | ||
|
|
9befb6bd06 | ||
|
|
8a12b78684 | ||
|
|
46168607e8 | ||
|
|
1272b9e186 | ||
|
|
fcbe19445a | ||
|
|
2b4dffa87d | ||
|
|
49a6d02e12 | ||
|
|
2b06dfd4a3 | ||
|
|
6e81b4e84b | ||
|
|
0de1282e1c | ||
|
|
e3db52ca1f | ||
|
|
c5a36d4c70 | ||
|
|
e28b6695ba | ||
|
|
de177f74fb | ||
|
|
e4310aef73 | ||
|
|
d71079e1d8 | ||
|
|
c08d2f36b0 | ||
|
|
839a80e339 | ||
|
|
a35535043e | ||
|
|
323dd7b22d | ||
|
|
102128ca2e | ||
|
|
f3dd75a2c4 | ||
|
|
0869814a0e | ||
|
|
41edee9fe9 | ||
|
|
2fa3694746 | ||
|
|
e9606b3635 | ||
|
|
ed7c4bb6a7 | ||
|
|
c93fea4ec4 | ||
|
|
aa847991e0 | ||
|
|
5f1b903bdf | ||
|
|
49f3531aed | ||
|
|
9b2934f1f6 | ||
|
|
26355768a0 | ||
|
|
80bff0dc2d | ||
|
|
c312a88aa3 | ||
|
|
625818d556 | ||
|
|
2c5a2e8ec7 | ||
|
|
ae16d5f52c | ||
|
|
d69be8a766 | ||
|
|
8df62a537b | ||
|
|
bf5e667351 | ||
|
|
0e5c2bd18e | ||
|
|
3233e64ace | ||
|
|
751117a308 | ||
|
|
8d9024f01f | ||
|
|
a9862f32f3 | ||
|
|
c8d6f47749 | ||
|
|
a6f134e48e | ||
|
|
fdbb5136d9 | ||
|
|
2c66918594 | ||
|
|
a0dac292ff | ||
|
|
75857e7177 | ||
|
|
0369f70b0b | ||
|
|
276b97530f | ||
|
|
d5daf49a9a | ||
|
|
feb16ae4aa | ||
|
|
219301339c | ||
|
|
aec349f75a | ||
|
|
5f909caedf | ||
|
|
ba6dff3adb | ||
|
|
8cd05f098b | ||
|
|
425f5387fa | ||
|
|
f2ce115ade | ||
|
|
090a3e1ded | ||
|
|
c0acb7d352 | ||
|
|
0d6070e6fc | ||
|
|
bd00f851f0 |
@@ -12,8 +12,7 @@
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_build_event_upload=minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
27
.github/workflows/stale_pr_checker.yml
vendored
Normal file
27
.github/workflows/stale_pr_checker.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Find stale PRs
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 13 * * 1'
|
||||
|
||||
jobs:
|
||||
fetch-PRs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fetch pull requests from here
|
||||
id: local
|
||||
uses: paritytech/stale-pr-finder@main
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
repo: prysm
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
with:
|
||||
channel-id: ${{ secrets.CHANNEL }}
|
||||
slack-message: |
|
||||
Stale PRs this week:
|
||||
${{ steps.local.outputs.message }}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
LOCAL_PR: ${{ steps.local.outputs.message }}"
|
||||
@@ -224,6 +224,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
|
||||
# fieldalignment disabled
|
||||
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
|
||||
|
||||
21
MODULE.bazel.lock
generated
21
MODULE.bazel.lock
generated
@@ -1123,6 +1123,27 @@
|
||||
"recordedRepoMappingEntries": []
|
||||
}
|
||||
},
|
||||
"@@bazel_tools//tools/test:extensions.bzl%remote_coverage_tools_extension": {
|
||||
"general": {
|
||||
"bzlTransitiveDigest": "l5mcjH2gWmbmIycx97bzI2stD0Q0M5gpDc0aLOHKIm8=",
|
||||
"recordedFileInputs": {},
|
||||
"recordedDirentsInputs": {},
|
||||
"envVariables": {},
|
||||
"generatedRepoSpecs": {
|
||||
"remote_coverage_tools": {
|
||||
"bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl",
|
||||
"ruleClassName": "http_archive",
|
||||
"attributes": {
|
||||
"sha256": "7006375f6756819b7013ca875eab70a541cf7d89142d9c511ed78ea4fefa38af",
|
||||
"urls": [
|
||||
"https://mirror.bazel.build/bazel_coverage_output_generator/releases/coverage_output_generator-v2.6.zip"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"recordedRepoMappingEntries": []
|
||||
}
|
||||
},
|
||||
"@@rules_java~//java:extensions.bzl%toolchains": {
|
||||
"general": {
|
||||
"bzlTransitiveDigest": "tJHbmWnq7m+9eUBnUdv7jZziQ26FmcGL9C5/hU3Q9UQ=",
|
||||
|
||||
28
WORKSPACE
28
WORKSPACE
@@ -29,23 +29,7 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
zig_toolchains()
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -243,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0"
|
||||
consensus_spec_version = "v1.5.0-alpha.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -259,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
|
||||
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
|
||||
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -291,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
|
||||
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -306,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
|
||||
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -1,11 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
"jwt.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -36,20 +36,25 @@ func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.RLock()
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
n.RUnlock()
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
n.Lock()
|
||||
// Double-check the condition to ensure it hasn't changed since the first check.
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
n.Unlock() // It's better to unlock as soon as the protected section is over.
|
||||
n.healthChan <- newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
|
||||
@@ -87,21 +87,15 @@ func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
go func() {
|
||||
for range n.HealthUpdates() {
|
||||
// Consume values to avoid blocking on channel send.
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
@@ -16,6 +17,7 @@ var (
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
@@ -25,6 +27,8 @@ type MockHealthClientMockRecorder struct {
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
@@ -41,6 +45,8 @@ func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
@@ -28,6 +29,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -49,6 +51,7 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -22,7 +23,7 @@ type SignedBid interface {
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() []byte
|
||||
Value() primitives.Wei
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
IsNil() bool
|
||||
@@ -125,8 +126,8 @@ func (b builderBid) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBid) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBid) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -165,7 +166,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
@@ -179,8 +180,8 @@ func (b builderBidCapella) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidCapella) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidCapella) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -222,8 +223,8 @@ func (b builderBidDeneb) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidDeneb) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidDeneb) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -249,7 +250,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
// Header --
|
||||
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -14,11 +13,11 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -282,133 +281,68 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
return err
|
||||
}
|
||||
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
switch sb.Version() {
|
||||
case version.Bellatrix:
|
||||
psb, err := sb.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||
return nil, nil, errors.New("not a bellatrix payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayload(p)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Capella:
|
||||
psb, err := sb.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseCapella{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||
return nil, nil, errors.New("not a capella payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Deneb:
|
||||
psb, err := sb.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(ðpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseDeneb{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
|
||||
return nil, nil, errors.New("not a deneb payload")
|
||||
}
|
||||
p, blobBundle, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, blobBundle, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
// massage the proto struct type data into the api response type.
|
||||
mj, err := structs.SignedBeaconBlockMessageJsoner(sb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error generating blinded beacon block post request")
|
||||
}
|
||||
|
||||
body, err := json.Marshal(mj)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error marshaling blinded block post request to json")
|
||||
}
|
||||
postOpts := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(sb.Version()))
|
||||
r.Header.Set("Content-Type", api.JsonMediaType)
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
// ExecutionPayloadResponse parses just the outer container and the Value key, enabling it to use the .Value
|
||||
// key to determine which underlying data type to use to finish the unmarshaling.
|
||||
ep := &ExecutionPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder ExecutionPayloadResponse")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(sb.Version()) {
|
||||
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "req=%s, recv=%s", strings.ToLower(ep.Version), version.String(sb.Version()))
|
||||
}
|
||||
// This parses the rest of the response and returns the inner data field.
|
||||
pp, err := ep.ParsePayload()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse execution payload from builder with version=%s", ep.Version)
|
||||
}
|
||||
// Get the payload as a proto.Message so it can be wrapped as an execution payload interface.
|
||||
pb, err := pp.PayloadProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(pb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bb, ok := pp.(BlobBundler)
|
||||
if ok {
|
||||
bbpb, err := bb.BundleProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from builder response with version=%s", ep.Version)
|
||||
}
|
||||
return ed, bbpb, nil
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -198,12 +198,12 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
|
||||
require.Equal(t, uint64(1), bidHeader.GasUsed())
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
// this matches the value in the testExampleHeaderResponse
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -230,12 +230,11 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -262,12 +261,13 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
@@ -432,7 +432,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
require.ErrorIs(t, err, errResponseVersionMismatch)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
|
||||
|
||||
@@ -9,11 +9,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
@@ -44,6 +48,9 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return []byte{}
|
||||
}
|
||||
@@ -91,6 +98,9 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// MarshalText returns a text byte representation of Uint256.
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
|
||||
}
|
||||
@@ -146,6 +156,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
}
|
||||
return ð.BuilderBid{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
}, nil
|
||||
@@ -265,6 +277,11 @@ func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (r *ExecutionPayload) PayloadProto() (proto.Message, error) {
|
||||
pb, err := r.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayload Proto
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -396,6 +413,51 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
|
||||
|
||||
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
|
||||
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
|
||||
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
|
||||
pb := ed.Proto()
|
||||
var data interface{}
|
||||
var err error
|
||||
var ver string
|
||||
switch pbStruct := pb.(type) {
|
||||
case *v1.ExecutionPayload:
|
||||
ver = version.String(version.Bellatrix)
|
||||
data, err = FromProto(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadCapella:
|
||||
ver = version.String(version.Capella)
|
||||
data, err = FromProtoCapella(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadDeneb:
|
||||
ver = version.String(version.Deneb)
|
||||
payloadStruct, err := FromProtoDeneb(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
|
||||
}
|
||||
data = &ExecutionPayloadDenebAndBlobsBundle{
|
||||
ExecutionPayload: &payloadStruct,
|
||||
BlobsBundle: FromBundleProto(bundle),
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidTypeConversion
|
||||
}
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
|
||||
}
|
||||
return &ExecutionPayloadResponse{
|
||||
Version: ver,
|
||||
Data: encoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
@@ -424,6 +486,8 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||
}
|
||||
return ð.BuilderBidCapella{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
@@ -523,6 +587,42 @@ type ExecPayloadResponseCapella struct {
|
||||
Data ExecutionPayloadCapella `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadResponse allows for unmarshaling just the Version field of the payload.
|
||||
// This allows it to return different ExecutionPayload types based on the version field.
|
||||
type ExecutionPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// ParsedPayload can retrieve the underlying protobuf message for the given execution payload response.
|
||||
type ParsedPayload interface {
|
||||
PayloadProto() (proto.Message, error)
|
||||
}
|
||||
|
||||
// BlobBundler can retrieve the underlying blob bundle protobuf message for the given execution payload response.
|
||||
type BlobBundler interface {
|
||||
BundleProto() (*v1.BlobsBundle, error)
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
|
||||
var toProto ParsedPayload
|
||||
switch r.Version {
|
||||
case version.String(version.Bellatrix):
|
||||
toProto = &ExecutionPayload{}
|
||||
case version.String(version.Capella):
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
case version.String(version.Deneb):
|
||||
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
|
||||
default:
|
||||
return nil, consensusblocks.ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(r.Data, toProto); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal the response .Data field with the stated version schema")
|
||||
}
|
||||
return toProto, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
@@ -547,6 +647,11 @@ func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, err
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (p *ExecutionPayloadCapella) PayloadProto() (proto.Message, error) {
|
||||
pb, err := p.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadCapella Proto.
|
||||
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -921,8 +1026,10 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1128,6 +1235,12 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
if r.Data == nil {
|
||||
return nil, nil, errors.New("data field in response is empty")
|
||||
}
|
||||
if r.Data.ExecutionPayload == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
if r.Data.BlobsBundle == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
payload, err := r.Data.ExecutionPayload.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -1139,8 +1252,26 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
return payload, bundle, nil
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) PayloadProto() (proto.Message, error) {
|
||||
if r.ExecutionPayload == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload in combined deneb payload")
|
||||
}
|
||||
pb, err := r.ExecutionPayload.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) BundleProto() (*v1.BlobsBundle, error) {
|
||||
if r.BlobsBundle == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
return r.BlobsBundle.ToProto()
|
||||
}
|
||||
|
||||
// ToProto returns the ExecutionPayloadDeneb Proto.
|
||||
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
if p == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
|
||||
@@ -15,9 +15,11 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -1600,7 +1602,6 @@ func TestBuilderBidUnmarshalUint256(t *testing.T) {
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
r := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
|
||||
//require.Equal(t, expectedValue, r.Data.Message.Value)
|
||||
marshaled := r.Data.Message.Value.String()
|
||||
require.Equal(t, base10, marshaled)
|
||||
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
|
||||
@@ -1907,3 +1908,40 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyResponseBody(t *testing.T) {
|
||||
t.Run("empty buffer", func(t *testing.T) {
|
||||
var b []byte
|
||||
r := &ExecutionPayloadResponse{}
|
||||
err := json.Unmarshal(b, r)
|
||||
_, ok := err.(*json.SyntaxError)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("empty object", func(t *testing.T) {
|
||||
empty := []byte("{}")
|
||||
emptyResponse := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(empty, emptyResponse))
|
||||
_, err := emptyResponse.ParsePayload()
|
||||
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
|
||||
})
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
|
||||
for i := range versions {
|
||||
vstr := version.String(versions[i])
|
||||
t.Run("populated version without payload"+vstr, func(t *testing.T) {
|
||||
in := &ExecutionPayloadResponse{Version: vstr}
|
||||
encoded, err := json.Marshal(in)
|
||||
require.NoError(t, err)
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(encoded, epr))
|
||||
pp, err := epr.ParsePayload()
|
||||
require.NoError(t, err)
|
||||
pb, err := pp.PayloadProto()
|
||||
if err == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, pb == nil)
|
||||
} else {
|
||||
require.ErrorIs(t, err, consensusblocks.ErrNilObject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,4 +4,6 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
SystemLogsPrefix = "health/logs"
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,7 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
@@ -104,7 +104,7 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
32
api/jwt.go
Normal file
32
api/jwt.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
|
||||
// used for beacon node -> execution client
|
||||
// used for web client -> validator client
|
||||
func GenerateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n != 32 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
// ValidateAuthToken validating auth token for web
|
||||
func ValidateAuthToken(token string) error {
|
||||
b, err := hexutil.Decode(token)
|
||||
// token should be hex-encoded and at least 256 bits
|
||||
if err != nil || len(b) < 32 {
|
||||
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
api/jwt_test.go
Normal file
13
api/jwt_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestGenerateRandomHexString(t *testing.T) {
|
||||
token, err := GenerateRandomHexString()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ValidateAuthToken(token))
|
||||
}
|
||||
@@ -2,29 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
srcs = ["error.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"error_test.go",
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
srcs = ["error_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
29
api/server/middleware/BUILD.bazel
Normal file
29
api/server/middleware/BUILD.bazel
Normal file
@@ -0,0 +1,29 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
112
api/server/middleware/middleware.go
Normal file
112
api/server/middleware/middleware.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
|
||||
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
|
||||
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// skip the GET request
|
||||
if r.Method == http.MethodGet {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
for _, acceptedType := range acceptedMediaTypes {
|
||||
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Unsupported media type: %s", contentType), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptHeaderHandler checks if the client's response preference is handled
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
204
api/server/middleware/middleware_test.go
Normal file
204
api/server/middleware/middleware_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentTypeHandler(t *testing.T) {
|
||||
acceptedMediaTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := ContentTypeHandler(acceptedMediaTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
contentType string
|
||||
expectedStatusCode int
|
||||
isGet bool
|
||||
}{
|
||||
{
|
||||
name: "Accepted Content-Type - application/json",
|
||||
contentType: api.JsonMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Content-Type - ssz format",
|
||||
contentType: api.OctetStreamMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Content-Type - text/plain",
|
||||
contentType: "text/plain",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "Missing Content-Type",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "GET request skips content type check",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
isGet: true,
|
||||
},
|
||||
{
|
||||
name: "Content type contains charset is ok",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
httpMethod := http.MethodPost
|
||||
if tt.isGet {
|
||||
httpMethod = http.MethodGet
|
||||
}
|
||||
req := httptest.NewRequest(httpMethod, "/", nil)
|
||||
if tt.contentType != "" {
|
||||
req.Header.Set("Content-Type", tt.contentType)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptHeaderHandler(acceptedTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acceptHeader string
|
||||
expectedStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "Accepted Accept-Type - application/json",
|
||||
acceptHeader: "application/json",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type - application/octet-stream",
|
||||
acceptHeader: "application/octet-stream",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type with parameters",
|
||||
acceptHeader: "application/json;q=0.9, application/octet-stream;q=0.8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Accept-Type - text/plain",
|
||||
acceptHeader: "text/plain",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "Missing Accept header",
|
||||
acceptHeader: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "*/* is accepted",
|
||||
acceptHeader: "*/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "application/* is accepted",
|
||||
acceptHeader: "application/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "/* is unsupported",
|
||||
acceptHeader: "/*",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "application/ is unsupported",
|
||||
acceptHeader: "application/",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
if tt.acceptHeader != "" {
|
||||
req.Header.Set("Accept", tt.acceptHeader)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -1,54 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
|
||||
@@ -1,10 +1,34 @@
|
||||
package structs
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// MessageJsoner describes a signed consensus type wrapper that can return the `.Message` field in a json envelope
|
||||
// encoded as a []byte, for use as a json.RawMessage value when encoding the outer envelope.
|
||||
type MessageJsoner interface {
|
||||
MessageRawJson() ([]byte, error)
|
||||
}
|
||||
|
||||
// SignedMessageJsoner embeds MessageJsoner and adds a method to also retrieve the Signature field as a string.
|
||||
type SignedMessageJsoner interface {
|
||||
MessageJsoner
|
||||
SigString() string
|
||||
}
|
||||
|
||||
type SignedBeaconBlock struct {
|
||||
Message *BeaconBlock `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlock{}
|
||||
|
||||
func (s *SignedBeaconBlock) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlock) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlock struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -29,6 +53,16 @@ type SignedBeaconBlockAltair struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockAltair{}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockAltair struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -54,6 +88,16 @@ type SignedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -80,6 +124,16 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -106,6 +160,16 @@ type SignedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -133,6 +197,16 @@ type SignedBlindedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -172,6 +246,16 @@ type SignedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockDeneb struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -208,6 +292,16 @@ type SignedBlindedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDeneb struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -15,6 +17,8 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrUnsupportedConversion = errors.New("Could not determine api struct type to use for value")
|
||||
|
||||
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
|
||||
msg, err := h.Message.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -1852,7 +1856,34 @@ func BeaconBlockFromConsensus(b *eth.BeaconBlock) *BeaconBlock {
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock) (SignedMessageJsoner, error) {
|
||||
pb, err := block.Proto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch pbStruct := pb.(type) {
|
||||
case *eth.SignedBeaconBlock:
|
||||
return SignedBeaconBlockPhase0FromConsensus(pbStruct), nil
|
||||
case *eth.SignedBeaconBlockAltair:
|
||||
return SignedBeaconBlockAltairFromConsensus(pbStruct), nil
|
||||
case *eth.SignedBlindedBeaconBlockBellatrix:
|
||||
return SignedBlindedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockBellatrix:
|
||||
return SignedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockCapella:
|
||||
return SignedBlindedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockCapella:
|
||||
return SignedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockDeneb:
|
||||
return SignedBlindedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockDeneb:
|
||||
return SignedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedConversion
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockPhase0FromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
return &SignedBeaconBlock{
|
||||
Message: BeaconBlockFromConsensus(b.Block),
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
|
||||
@@ -137,7 +137,7 @@ go_test(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -399,14 +398,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -518,13 +509,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +325,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -342,7 +342,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -401,7 +401,7 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// if the attestation is one epoch older, it wouldn't been useful to save it.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
|
||||
@@ -312,14 +312,14 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
@@ -389,14 +389,14 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||
@@ -517,14 +517,14 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
@@ -369,6 +369,6 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
|
||||
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, att := range blk.Body().Attestations() {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.GetData().Slot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,17 +36,17 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := ethpb.CopyCheckpoint(a.Data.Target)
|
||||
tgt := ethpb.CopyCheckpoint(a.GetData().Target)
|
||||
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
@@ -67,7 +67,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
if err := s.verifyBeaconBlock(ctx, a.GetData()); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
@@ -75,16 +75,16 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -73,7 +75,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
a ethpb.Att
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
@@ -125,25 +127,36 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
|
||||
@@ -366,17 +366,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []ethpb.AttSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
|
||||
@@ -824,7 +824,10 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
atts := b.Block.Body.Attestations
|
||||
atts := make([]ethpb.Att, len(b.Block.Body.Attestations))
|
||||
for i, a := range b.Block.Body.Attestations {
|
||||
atts[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -1960,68 +1963,130 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block().HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
|
||||
@@ -31,7 +31,7 @@ type AttestationStateFetcher interface {
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att ethpb.Att) error
|
||||
InForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
@@ -51,13 +51,13 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
|
||||
if !bytes.Equal(a.GetData().Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.GetData().BeaconBlockRoot, a.GetData().Target.Root, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -170,13 +170,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
@@ -185,17 +185,17 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
if !helpers.VerifyCheckpointEpoch(a.GetData().Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -83,7 +83,11 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
service.processAttestations(ctx, 0)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
@@ -121,10 +125,14 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
// Generate attestations for this block in Slot 1
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
// Verify the target is in forkchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
|
||||
|
||||
// Insert a new block to forkchoice
|
||||
|
||||
@@ -52,7 +52,7 @@ type BlobReceiver interface {
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
@@ -295,10 +295,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []ethpb.AttSlashing{slashing})
|
||||
}
|
||||
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
@@ -479,12 +479,12 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
@@ -497,7 +497,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -80,7 +80,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
|
||||
@@ -8,11 +8,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
@@ -49,7 +50,7 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -79,7 +80,7 @@ type testServiceRequirements struct {
|
||||
attPool attestations.Pool
|
||||
attSrv *attestations.Service
|
||||
blsPool *blstoexec.Pool
|
||||
dc *depositcache.DepositCache
|
||||
dc *depositsnapshot.Cache
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
@@ -94,7 +95,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
|
||||
require.NoError(t, err)
|
||||
blsPool := blstoexec.NewPool()
|
||||
dc, err := depositcache.New()
|
||||
dc, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
req := &testServiceRequirements{
|
||||
ctx: ctx,
|
||||
@@ -120,6 +121,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -414,8 +414,8 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a ethpb.Att) error {
|
||||
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
return nil
|
||||
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, ethpb.AttSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
|
||||
@@ -2,7 +2,6 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/builder"
|
||||
@@ -55,13 +54,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Capella:
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/BUILD.bazel
vendored
3
beacon-chain/cache/BUILD.bazel
vendored
@@ -6,6 +6,7 @@ go_library(
|
||||
"active_balance.go",
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
@@ -70,6 +71,7 @@ go_test(
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
@@ -93,6 +95,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
37
beacon-chain/cache/active_balance.go
vendored
37
beacon-chain/cache/active_balance.go
vendored
@@ -3,17 +3,13 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,36 +82,3 @@ func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
balanceCacheHit.Inc()
|
||||
return value.(uint64), nil
|
||||
}
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", errors.Errorf("start slot calculation overflows: %v", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
|
||||
31
beacon-chain/cache/active_balance_test.go
vendored
31
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,12 +1,13 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -27,33 +28,33 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoPhase0(raw)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewEffectiveBalanceCache()
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
cc := cache.NewEffectiveBalanceCache()
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b := uint64(100)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(200)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(300)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
}
|
||||
@@ -72,6 +73,6 @@ func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(math.MaxUint64)))
|
||||
|
||||
_, err = balanceCacheKey(st)
|
||||
_, err = cache.BalanceCacheKey(st)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", fmt.Errorf("start slot calculation overflows: %w", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
11
beacon-chain/cache/checkpoint_state_test.go
vendored
11
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -1,8 +1,9 @@
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
cache := cache.NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -58,16 +59,16 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
c := cache.NewCheckpointStateCache()
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
for i := uint64(0); i < uint64(cache.MaxCheckpointStateSize()+100); i++ {
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: primitives.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
|
||||
}
|
||||
|
||||
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -1,50 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"log.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposits_cache_test.go",
|
||||
"pending_deposits_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
@@ -1,327 +0,0 @@
|
||||
// Package depositcache is the source of validator deposits maintained
|
||||
// in-memory by the beacon node – deposits processed from the
|
||||
// eth1 powchain are then stored in this cache to be accessed by
|
||||
// any other service during a beacon node's runtime.
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits",
|
||||
Help: "The number of total deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
// in the beacon state up to the latest finalized checkpoint.
|
||||
type FinalizedDeposits struct {
|
||||
deposits *trie.SparseMerkleTrie
|
||||
merkleTrieIndex int64
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits FinalizedDeposits
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if int(index) != len(dc.deposits) {
|
||||
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*ethpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits()
|
||||
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(dc.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(dc.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return nil
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
|
||||
continue
|
||||
}
|
||||
if d.Index > eth1DepositIndex {
|
||||
break
|
||||
}
|
||||
depHash, err := d.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash deposit data")
|
||||
}
|
||||
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
|
||||
return errors.Wrap(err, "could not insert deposit hash")
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("not a sparse merkle tree")
|
||||
}
|
||||
dc.finalizedDeposits = FinalizedDeposits{
|
||||
deposits: tree,
|
||||
merkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return &FinalizedDeposits{
|
||||
deposits: dc.finalizedDeposits.deposits.Copy(),
|
||||
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
if dc.finalizedDeposits.Deposits() == nil {
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
dc.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
|
||||
if fd.deposits != nil {
|
||||
return fd.deposits
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
// the finalized deposit container.
|
||||
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
|
||||
return fd.merkleTrieIndex
|
||||
}
|
||||
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
File diff suppressed because it is too large
Load Diff
5
beacon-chain/cache/depositcache/log.go
vendored
5
beacon-chain/cache/depositcache/log.go
vendored
@@ -1,5 +0,0 @@
|
||||
package depositcache
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "depositcache")
|
||||
151
beacon-chain/cache/depositcache/pending_deposits.go
vendored
151
beacon-chain/cache/depositcache/pending_deposits.go
vendored
@@ -1,151 +0,0 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacondb_pending_deposits",
|
||||
Help: "The number of pending deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
depositCntrs := dc.PendingContainers(ctx, untilBlk)
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
log.Debug("Ignoring nil deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.Proto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
}
|
||||
if h == depRoot {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx >= 0 {
|
||||
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
|
||||
pendingDepositsCount.Dec()
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
dc.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
|
||||
}
|
||||
@@ -34,6 +34,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_fetcher_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
|
||||
@@ -262,6 +262,12 @@ func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedD
|
||||
}
|
||||
}
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
|
||||
@@ -1,82 +1,32 @@
|
||||
package depositcache
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var _ PendingDepositsFetcher = (*DepositCache)(nil)
|
||||
var _ PendingDepositsFetcher = (*Cache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
proof1 := makeDepositProof()
|
||||
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
proof2 := makeDepositProof()
|
||||
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
data := ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 0,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
depToRemove := ðpb.Deposit{Proof: proof1, Data: data}
|
||||
otherDep := ðpb.Deposit{Proof: proof2, Data: data}
|
||||
db.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
db.RemovePendingDeposit(context.Background(), depToRemove)
|
||||
|
||||
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
|
||||
t.Error("Failed to remove deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
|
||||
}
|
||||
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
proof := makeDepositProof()
|
||||
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
data := ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 0,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
dep := ðpb.Deposit{Proof: proof, Data: data}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
|
||||
}
|
||||
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
@@ -96,7 +46,7 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
@@ -120,7 +70,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
@@ -99,11 +99,23 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
if d.depositCount <= 0 {
|
||||
return [32]byte{}, nil, ErrInvalidDepositCount
|
||||
}
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
if finalizedDeposits != 0 {
|
||||
finalizedDeposits = finalizedDeposits - 1
|
||||
if index >= d.depositCount {
|
||||
return [32]byte{}, nil, ErrInvalidIndex
|
||||
}
|
||||
if index <= finalizedDeposits {
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
finalizedIdx := -1
|
||||
if finalizedDeposits != 0 {
|
||||
fd, err := math.Int(finalizedDeposits)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
finalizedIdx = fd - 1
|
||||
}
|
||||
i, err := math.Int(index)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
if finalizedDeposits > 0 && i <= finalizedIdx {
|
||||
return [32]byte{}, nil, ErrInvalidIndex
|
||||
}
|
||||
leaf, proof := generateProof(d.tree, index, DepositContractDepth)
|
||||
|
||||
18
beacon-chain/cache/private_access_test.go
vendored
Normal file
18
beacon-chain/cache/private_access_test.go
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
)
|
||||
|
||||
func BalanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
return balanceCacheKey(st)
|
||||
}
|
||||
|
||||
func MaxCheckpointStateSize() int {
|
||||
return maxCheckpointStateSize
|
||||
}
|
||||
|
||||
func (c *CheckpointStateCache) Cache() *lru.Cache {
|
||||
return c.cache
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -125,7 +126,7 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := NewSyncCommitteeHeadState()
|
||||
c := cache.NewSyncCommitteeHeadState()
|
||||
if tt.put != nil {
|
||||
err := c.Put(tt.put.slot, tt.put.state)
|
||||
if (err != nil) != tt.wantPutErr {
|
||||
|
||||
@@ -48,7 +48,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att ethpb.Att,
|
||||
totalBalance uint64,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
|
||||
@@ -58,24 +58,24 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
|
||||
delay, err := beaconState.Slot().SafeSubSlot(att.GetData().Slot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.Data.Slot, beaconState.Slot())
|
||||
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.GetData().Slot, beaconState.Slot())
|
||||
}
|
||||
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.Data, delay)
|
||||
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.GetData(), delay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.Data.Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
}
|
||||
|
||||
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
|
||||
|
||||
@@ -195,47 +195,95 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_OK(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
})
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
})
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
block := util.NewBeaconBlockElectra()
|
||||
block.Block.Body.Attestations = []*ethpb.AttestationElectra{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
@@ -273,7 +321,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
|
||||
@@ -28,87 +28,87 @@ import (
|
||||
// process_historical_roots_update(state)
|
||||
// process_participation_flag_updates(state) # [New in Altair]
|
||||
// process_sync_committee_updates(state) # [New in Altair]
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
return errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
return errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
return errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = e.ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessEffectiveBalanceUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
_, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
@@ -45,9 +45,9 @@ func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
func TestProcessEpoch_CanProcessBellatrix(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
|
||||
@@ -154,11 +154,11 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0], committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
has, err := altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex)
|
||||
|
||||
@@ -46,7 +46,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func VerifyAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att ethpb.Att,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -56,7 +56,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
}
|
||||
currEpoch := time.CurrentEpoch(beaconState)
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
data := att.Data
|
||||
data := att.GetData()
|
||||
if data.Target.Epoch != prevEpoch && data.Target.Epoch != currEpoch {
|
||||
return fmt.Errorf(
|
||||
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
|
||||
@@ -76,11 +76,11 @@ func VerifyAttestationNoVerifySignature(
|
||||
}
|
||||
}
|
||||
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := att.Data.Slot
|
||||
s := att.GetData().Slot
|
||||
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
|
||||
if !minInclusionCheck {
|
||||
return fmt.Errorf(
|
||||
@@ -102,27 +102,58 @@ func VerifyAttestationNoVerifySignature(
|
||||
)
|
||||
}
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
if uint64(att.Data.CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err := helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
if att.Version() < version.Electra {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
participantsCount := 0
|
||||
var err error
|
||||
for i, ci := range committeeIndices {
|
||||
if uint64(ci) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", ci, c)
|
||||
}
|
||||
committees[i], err = helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
participantsCount += len(committees[i])
|
||||
}
|
||||
if att.GetAggregationBits().Len() != uint64(participantsCount) {
|
||||
return fmt.Errorf("aggregation bits count %d is different than participant count %d", att.GetAggregationBits().Len(), participantsCount)
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
@@ -133,7 +164,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att ethpb.Att,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -143,15 +174,15 @@ func ProcessAttestationNoVerifySignature(
|
||||
}
|
||||
|
||||
currEpoch := time.CurrentEpoch(beaconState)
|
||||
data := att.Data
|
||||
s := att.Data.Slot
|
||||
data := att.GetData()
|
||||
s := att.GetData().Slot
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingAtt := ðpb.PendingAttestation{
|
||||
Data: data,
|
||||
AggregationBits: att.AggregationBits,
|
||||
AggregationBits: att.GetAggregationBits(),
|
||||
InclusionDelay: beaconState.Slot() - s,
|
||||
ProposerIndex: proposerIndex,
|
||||
}
|
||||
@@ -169,23 +200,6 @@ func ProcessAttestationNoVerifySignature(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att *ethpb.Attestation) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
|
||||
}
|
||||
|
||||
// VerifyIndexedAttestation determines the validity of an indexed attestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -203,7 +217,7 @@ func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyB
|
||||
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
|
||||
// signing_root = compute_signing_root(indexed_attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
|
||||
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt *ethpb.IndexedAttestation) error {
|
||||
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt ethpb.IndexedAtt) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -212,14 +226,14 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
domain, err := signing.Domain(
|
||||
beaconState.Fork(),
|
||||
indexedAtt.Data.Target.Epoch,
|
||||
indexedAtt.GetData().Target.Epoch,
|
||||
params.BeaconConfig().DomainBeaconAttester,
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices1, err := attestation.AttestingIndices(att1.AggregationBits, committee)
|
||||
attestingIndices1, err := attestation.AttestingIndices(att1, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices1))
|
||||
for i, indice := range attestingIndices1 {
|
||||
@@ -66,7 +66,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices2, err := attestation.AttestingIndices(att2.AggregationBits, committee)
|
||||
attestingIndices2, err := attestation.AttestingIndices(att2, committee)
|
||||
require.NoError(t, err)
|
||||
sigs = make([]bls.Signature, len(attestingIndices2))
|
||||
for i, indice := range attestingIndices2 {
|
||||
@@ -221,6 +221,83 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
|
||||
require.ErrorContains(t, "committee index 100 >= committee count 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 100)
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(1, true)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("non-zero committee index", func(t *testing.T) {
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitlist(1),
|
||||
CommitteeBits: bitfield.NewBitvector64(),
|
||||
}
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
|
||||
})
|
||||
t.Run("index of committee too big", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(63, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "committee index 63 >= committee count 1", err)
|
||||
})
|
||||
t.Run("wrong aggregation bits count", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(123)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "aggregation bits count 123 is different than participant count 3", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -386,7 +463,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []*ethpb.Attestation
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -402,7 +479,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []*ethpb.Attestation{}
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
@@ -501,53 +578,109 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits1 := primitives.NewAttestationCommitteeBits()
|
||||
commBits1.SetBitAt(0, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
CommitteeBits: commBits1,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits2 := primitives.NewAttestationCommitteeBits()
|
||||
commBits2.SetBitAt(1, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
CommitteeBits: commBits2,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
}
|
||||
|
||||
func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
@@ -607,6 +740,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -40,7 +38,7 @@ import (
|
||||
func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.AttesterSlashing,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
@@ -57,7 +55,7 @@ func ProcessAttesterSlashings(
|
||||
func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.AttesterSlashing,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
@@ -77,19 +75,7 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
cfg := params.BeaconConfig()
|
||||
var slashingQuotient uint64
|
||||
switch {
|
||||
case beaconState.Version() == version.Phase0:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
@@ -104,20 +90,20 @@ func ProcessAttesterSlashing(
|
||||
}
|
||||
|
||||
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing *ethpb.AttesterSlashing) error {
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing ethpb.AttSlashing) error {
|
||||
if slashing == nil {
|
||||
return errors.New("nil slashing")
|
||||
}
|
||||
if slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
if slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
|
||||
return errors.New("nil attestation")
|
||||
}
|
||||
if slashing.Attestation_1.Data == nil || slashing.Attestation_2.Data == nil {
|
||||
if slashing.FirstAttestation().GetData() == nil || slashing.SecondAttestation().GetData() == nil {
|
||||
return errors.New("nil attestation data")
|
||||
}
|
||||
att1 := slashing.Attestation_1
|
||||
att2 := slashing.Attestation_2
|
||||
data1 := att1.Data
|
||||
data2 := att2.Data
|
||||
att1 := slashing.FirstAttestation()
|
||||
att2 := slashing.SecondAttestation()
|
||||
data1 := att1.GetData()
|
||||
data2 := att2.GetData()
|
||||
if !IsSlashableAttestationData(data1, data2) {
|
||||
return errors.New("attestations are not slashable")
|
||||
}
|
||||
@@ -157,11 +143,11 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
}
|
||||
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
func SlashableAttesterIndices(slashing ethpb.AttSlashing) []uint64 {
|
||||
if slashing == nil || slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
|
||||
return nil
|
||||
}
|
||||
indices1 := slashing.Attestation_1.AttestingIndices
|
||||
indices2 := slashing.Attestation_2.AttestingIndices
|
||||
indices1 := slashing.FirstAttestation().GetAttestingIndices()
|
||||
indices2 := slashing.SecondAttestation().GetAttestingIndices()
|
||||
return slice.IntersectionUint64(indices1, indices2)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,11 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
@@ -92,7 +96,11 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -144,7 +152,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -213,7 +225,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -282,7 +298,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -351,7 +371,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
|
||||
@@ -216,7 +216,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []*ethpb.AttesterSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -297,21 +297,6 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
attestation := ðpb.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttestationSignature(ctx, s, attestation)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
|
||||
@@ -91,7 +91,11 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -135,8 +135,6 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
undo := util.HackDenebMaxuint(t)
|
||||
defer undo()
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
|
||||
@@ -163,7 +163,42 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -610,7 +609,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
require.NoError(t, err)
|
||||
@@ -874,7 +873,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
}, big.NewInt(0))
|
||||
})
|
||||
}
|
||||
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type slashValidatorFunc func(ctx context.Context, st state.BeaconState, vid primitives.ValidatorIndex, penaltyQuotient, proposerRewardQuotient uint64) (state.BeaconState, error)
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
@@ -75,19 +77,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
var slashingQuotient uint64
|
||||
switch {
|
||||
case beaconState.Version() == version.Phase0:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
func createAttestationSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
atts []*ethpb.Attestation,
|
||||
atts []ethpb.Att,
|
||||
domain []byte,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
@@ -191,19 +191,19 @@ func createAttestationSignatureBatch(
|
||||
msgs := make([][32]byte, len(atts))
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.Signature
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
sigs[i] = a.GetSignature()
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, c)
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := attestation.IsValidAttestationIndices(ctx, ia); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := ia.AttestingIndices
|
||||
indices := ia.GetAttestingIndices()
|
||||
pubkeys := make([][]byte, len(indices))
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
@@ -215,7 +215,7 @@ func createAttestationSignatureBatch(
|
||||
}
|
||||
pks[i] = aggP
|
||||
|
||||
root, err := signing.ComputeSigningRoot(ia.Data, domain)
|
||||
root, err := signing.ComputeSigningRoot(ia.GetData(), domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get signing root of object")
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func createAttestationSignatureBatch(
|
||||
|
||||
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature batch object.
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []ethpb.Att) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
@@ -243,10 +243,10 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
|
||||
dt := params.BeaconConfig().DomainBeaconAttester
|
||||
|
||||
// Split attestations by fork. Note: the signature domain will differ based on the fork.
|
||||
var preForkAtts []*ethpb.Attestation
|
||||
var postForkAtts []*ethpb.Attestation
|
||||
var preForkAtts []ethpb.Att
|
||||
var postForkAtts []ethpb.Att
|
||||
for _, a := range atts {
|
||||
if slots.ToEpoch(a.Data.Slot) < fork.Epoch {
|
||||
if slots.ToEpoch(a.GetData().Slot) < fork.Epoch {
|
||||
preForkAtts = append(preForkAtts, a)
|
||||
} else {
|
||||
postForkAtts = append(postForkAtts, a)
|
||||
|
||||
@@ -145,7 +145,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -250,12 +249,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
type args struct {
|
||||
Name string
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
Name string
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
}
|
||||
type control struct {
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
@@ -283,7 +282,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
Amount: withdrawalAmount(i),
|
||||
}
|
||||
}
|
||||
partialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
@@ -321,12 +320,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 21),
|
||||
PendingPartialWithdrawal(7, 21),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -386,12 +385,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
|
||||
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -406,14 +405,14 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
},
|
||||
},
|
||||
@@ -453,17 +452,17 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success more than max partially withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Name: "success more than max partially withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
|
||||
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
|
||||
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
|
||||
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
|
||||
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
|
||||
partialWithdrawal(27, 37),
|
||||
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
|
||||
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
|
||||
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
|
||||
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
|
||||
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
|
||||
PendingPartialWithdrawal(27, 37),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -491,12 +490,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "failure wrong number of partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 37,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Name: "failure wrong number of partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 37,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
|
||||
PendingPartialWithdrawal(7, 21), PendingPartialWithdrawal(9, 22),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -540,7 +539,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
|
||||
fullWithdrawal(1, 25),
|
||||
},
|
||||
},
|
||||
@@ -564,10 +563,10 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "failure validator not partially withdrawable",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
|
||||
Name: "failure validator not partially withdrawable",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(notPartiallyWithdrawable, 22),
|
||||
},
|
||||
@@ -611,7 +610,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
}
|
||||
for _, idx := range arguments.PartialWithdrawalIndices {
|
||||
for _, idx := range arguments.PendingPartialWithdrawalIndices {
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
}
|
||||
@@ -629,8 +628,8 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
if test.Args.FullWithdrawalIndices == nil {
|
||||
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
if test.Args.PartialWithdrawalIndices == nil {
|
||||
test.Args.PartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
if test.Args.PendingPartialWithdrawalIndices == nil {
|
||||
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
slot, err := slots.EpochStart(currentEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -643,10 +642,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
|
||||
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
|
||||
big.NewInt(0),
|
||||
)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]})
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
@@ -673,12 +669,12 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
type args struct {
|
||||
Name string
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
Name string
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
}
|
||||
type control struct {
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
@@ -706,7 +702,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
Amount: withdrawalAmount(i),
|
||||
}
|
||||
}
|
||||
partialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
@@ -744,12 +740,12 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 21),
|
||||
PendingPartialWithdrawal(7, 21),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -809,12 +805,12 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
|
||||
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -829,14 +825,14 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
},
|
||||
},
|
||||
@@ -876,17 +872,17 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success more than max partially withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Name: "success more than max partially withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
|
||||
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
|
||||
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
|
||||
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
|
||||
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
|
||||
partialWithdrawal(27, 37),
|
||||
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
|
||||
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
|
||||
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
|
||||
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
|
||||
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
|
||||
PendingPartialWithdrawal(27, 37),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -914,12 +910,12 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "failure wrong number of partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 37,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Name: "failure wrong number of partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 37,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
|
||||
PendingPartialWithdrawal(7, 21), PendingPartialWithdrawal(9, 22),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
@@ -963,7 +959,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
|
||||
fullWithdrawal(1, 25),
|
||||
},
|
||||
},
|
||||
@@ -987,10 +983,10 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "failure validator not partially withdrawable",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
|
||||
Name: "failure validator not partially withdrawable",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(notPartiallyWithdrawable, 22),
|
||||
},
|
||||
@@ -1034,7 +1030,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
}
|
||||
for _, idx := range arguments.PartialWithdrawalIndices {
|
||||
for _, idx := range arguments.PendingPartialWithdrawalIndices {
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
}
|
||||
@@ -1052,8 +1048,8 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
if test.Args.FullWithdrawalIndices == nil {
|
||||
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
if test.Args.PartialWithdrawalIndices == nil {
|
||||
test.Args.PartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
if test.Args.PendingPartialWithdrawalIndices == nil {
|
||||
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
slot, err := slots.EpochStart(currentEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -1064,7 +1060,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
st, err := prepareValidators(spb, test.Args)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
|
||||
79
beacon-chain/core/electra/BUILD.bazel
Normal file
79
beacon-chain/core/electra/BUILD.bazel
Normal file
@@ -0,0 +1,79 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"churn.go",
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"churn_test.go",
|
||||
"consolidations_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/core/electra/attestation.go
Normal file
7
beacon-chain/core/electra/attestation.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package electra
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
|
||||
var (
|
||||
ProcessAttestationsNoVerifySignature = altair.ProcessAttestationsNoVerifySignature
|
||||
)
|
||||
85
beacon-chain/core/electra/churn.go
Normal file
85
beacon-chain/core/electra/churn.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// ComputeConsolidationEpochAndUpdateChurn fulfills the consensus spec definition below. This method
|
||||
// calls mutating methods to the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidation_balance: Gwei) -> Epoch:
|
||||
// earliest_consolidation_epoch = max(
|
||||
// state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
|
||||
// per_epoch_consolidation_churn = get_consolidation_churn_limit(state)
|
||||
// # New epoch for consolidations.
|
||||
// if state.earliest_consolidation_epoch < earliest_consolidation_epoch:
|
||||
// consolidation_balance_to_consume = per_epoch_consolidation_churn
|
||||
// else:
|
||||
// consolidation_balance_to_consume = state.consolidation_balance_to_consume
|
||||
//
|
||||
// # Consolidation doesn't fit in the current earliest epoch.
|
||||
// if consolidation_balance > consolidation_balance_to_consume:
|
||||
// balance_to_process = consolidation_balance - consolidation_balance_to_consume
|
||||
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
|
||||
// earliest_consolidation_epoch += additional_epochs
|
||||
// consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn
|
||||
//
|
||||
// # Consume the balance and update state variables.
|
||||
// state.consolidation_balance_to_consume = consolidation_balance_to_consume - consolidation_balance
|
||||
// state.earliest_consolidation_epoch = earliest_consolidation_epoch
|
||||
//
|
||||
// return state.earliest_consolidation_epoch
|
||||
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := s.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(s.Slot())))
|
||||
activeBal, err := helpers.TotalActiveBalance(s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
|
||||
// New epoch for consolidations.
|
||||
var consolidationBalanceToConsume primitives.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
consolidationBalanceToConsume, err = s.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Consolidation doesn't fit in the current earliest epoch.
|
||||
if consolidationBalance > consolidationBalanceToConsume {
|
||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
||||
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
|
||||
additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
// Consume the balance and update state variables.
|
||||
if err := s.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return earliestConsolidationEpoch, nil
|
||||
}
|
||||
141
beacon-chain/core/electra/churn_test.go
Normal file
141
beacon-chain/core/electra/churn_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
|
||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
}
|
||||
}
|
||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
vals = append(vals, ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
|
||||
})
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
// Test setup: create a state with 32M ETH total active balance.
|
||||
// In this state, the churn is expected to be 232 ETH per epoch.
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
consolidationBalance primitives.Gwei
|
||||
expectedEpoch primitives.Epoch
|
||||
expectedConsolidationBalanceToConsume primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "compute consolidation with no consolidation balance",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 0, // 0 ETH
|
||||
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
|
||||
expectedConsolidationBalanceToConsume: 232000000000, // 232 ETH
|
||||
},
|
||||
{
|
||||
name: "new epoch for consolidations",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 32000000000, // 32 ETH
|
||||
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
{
|
||||
name: "flows into another epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 235000000000, // 235 ETH
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 229000000000, // 229 ETH
|
||||
},
|
||||
{
|
||||
name: "not a new epoch, fits in remaining balance of current epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 15,
|
||||
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 32000000000, // 32 ETH
|
||||
expectedEpoch: 15, // Fits into current earliest consolidation epoch.
|
||||
expectedConsolidationBalanceToConsume: 168000000000, // 126 ETH
|
||||
},
|
||||
{
|
||||
name: "not a new epoch, fits in remaining balance of current epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 15,
|
||||
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 232000000000, // 232 ETH
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotEpoch, err := electra.ComputeConsolidationEpochAndUpdateChurn(context.TODO(), tt.state, tt.consolidationBalance)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedEpoch, gotEpoch)
|
||||
// Check consolidation balance to consume is set on the state.
|
||||
cbtc, err := tt.state.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedConsolidationBalanceToConsume, cbtc)
|
||||
// Check earliest consolidation epoch was set on the state.
|
||||
gotEpoch, err = tt.state.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedEpoch, gotEpoch)
|
||||
})
|
||||
}
|
||||
}
|
||||
258
beacon-chain/core/electra/consolidations.go
Normal file
258
beacon-chain/core/electra/consolidations.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > get_current_epoch(state):
|
||||
// break
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// switch_to_compounding_validator(state, pending_consolidation.target_index)
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// active_balance = get_active_balance(state, pending_consolidation.source_index)
|
||||
// decrease_balance(state, pending_consolidation.source_index, active_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, active_balance)
|
||||
// next_pending_consolidation += 1
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pc := range pendingConsolidations {
|
||||
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sourceValidator.Slashed {
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > currentEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
nextPendingConsolidation++
|
||||
}
|
||||
|
||||
if nextPendingConsolidation > 0 {
|
||||
return st.SetPendingConsolidations(pendingConsolidations[nextPendingConsolidation:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidations implements the spec definition below. This method makes mutating calls to
|
||||
// the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
|
||||
// # If the pending consolidations queue is full, no consolidations are allowed in the block
|
||||
// assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
|
||||
// # If there is too little available consolidation churn limit, no consolidations are allowed in the block
|
||||
// assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
|
||||
// consolidation = signed_consolidation.message
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// assert consolidation.source_index != consolidation.target_index
|
||||
//
|
||||
// source_validator = state.validators[consolidation.source_index]
|
||||
// target_validator = state.validators[consolidation.target_index]
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// assert is_active_validator(source_validator, current_epoch)
|
||||
// assert is_active_validator(target_validator, current_epoch)
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// # Consolidations must specify an epoch when they become valid; they are not valid before then
|
||||
// assert current_epoch >= consolidation.epoch
|
||||
//
|
||||
// # Verify the source and the target have Execution layer withdrawal credentials
|
||||
// assert has_execution_withdrawal_credential(source_validator)
|
||||
// assert has_execution_withdrawal_credential(target_validator)
|
||||
// # Verify the same withdrawal address
|
||||
// assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
|
||||
//
|
||||
// # Verify consolidation is signed by the source and the target
|
||||
// domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
|
||||
// signing_root = compute_signing_root(consolidation, domain)
|
||||
// pubkeys = [source_validator.pubkey, target_validator.pubkey]
|
||||
// assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
|
||||
//
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance)
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=consolidation.source_index,
|
||||
// target_index=consolidation.target_index
|
||||
// ))
|
||||
func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethpb.SignedConsolidation) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil // Nothing to process.
|
||||
}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil, // Use genesis fork version
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helpers.ConsolidationChurnLimit(primitives.Gwei(totalBalance)) <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return errors.New("too little available consolidation churn limit")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
for _, c := range cs {
|
||||
if c == nil || c.Message == nil {
|
||||
return errors.New("nil consolidation")
|
||||
}
|
||||
|
||||
if n, err := st.NumPendingConsolidations(); err != nil {
|
||||
return err
|
||||
} else if n >= params.BeaconConfig().PendingConsolidationsLimit {
|
||||
return errors.New("pending consolidations queue is full")
|
||||
}
|
||||
|
||||
if c.Message.SourceIndex == c.Message.TargetIndex {
|
||||
return errors.New("source and target index are the same")
|
||||
}
|
||||
source, err := st.ValidatorAtIndex(c.Message.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target, err := st.ValidatorAtIndex(c.Message.TargetIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(source, currentEpoch) {
|
||||
return errors.New("source is not active")
|
||||
}
|
||||
if !helpers.IsActiveValidator(target, currentEpoch) {
|
||||
return errors.New("target is not active")
|
||||
}
|
||||
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("source exit epoch has been initiated")
|
||||
}
|
||||
if target.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("target exit epoch has been initiated")
|
||||
}
|
||||
if currentEpoch < c.Message.Epoch {
|
||||
return errors.New("consolidation is not valid yet")
|
||||
}
|
||||
|
||||
if !helpers.HasExecutionWithdrawalCredentials(source) {
|
||||
return errors.New("source does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.HasExecutionWithdrawalCredentials(target) {
|
||||
return errors.New("target does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.IsSameWithdrawalCredentials(source, target) {
|
||||
return errors.New("source and target have different withdrawal credentials")
|
||||
}
|
||||
|
||||
sr, err := signing.ComputeSigningRoot(c.Message, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourcePk, err := bls.PublicKeyFromBytes(source.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert source public key bytes to bls public key")
|
||||
}
|
||||
targetPk, err := bls.PublicKeyFromBytes(target.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert target public key bytes to bls public key")
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(c.Signature)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
if !sig.FastAggregateVerify([]bls.PublicKey{sourcePk, targetPk}, sr) {
|
||||
return errors.New("consolidation signature verification failed")
|
||||
}
|
||||
|
||||
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(source.EffectiveBalance))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
source.ExitEpoch = sEE
|
||||
source.WithdrawableEpoch = sEE + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
if err := st.UpdateValidatorAtIndex(c.Message.SourceIndex, source); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.AppendPendingConsolidation(c.Message.ToPendingConsolidation()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
441
beacon-chain/core/electra/consolidations_test.go
Normal file
441
beacon-chain/core/electra/consolidations_test.go
Normal file
@@ -0,0 +1,441 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/interop"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingConsolidations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
check func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no pending consolidations",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "processes pending consolidation successfully",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// Balances are transferred from v0 to v1.
|
||||
bal0, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), bal0)
|
||||
bal1, err := st.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2*params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// The pending consolidation is removed from the list.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
|
||||
// v1 is switched to compounding validator.
|
||||
v1, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "stop processing when a source val withdrawable epoch is in the future",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
WithdrawableEpoch: 100,
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// No balances are transferred from v0 to v1.
|
||||
bal0, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
|
||||
bal1, err := st.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// The pending consolidation is still in the list.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), num)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "slashed validator is not consolidated",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
{
|
||||
Slashed: true,
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xCC},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
TargetIndex: 3,
|
||||
},
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// No balances are transferred from v2 to v3.
|
||||
bal0, err := st.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
|
||||
bal1, err := st.BalanceAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// No pending consolidation remaining.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessPendingConsolidations(context.TODO(), tt.state)
|
||||
require.Equal(t, tt.wantErr, err != nil)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidations(t *testing.T) {
|
||||
secretKeys, publicKeys, err := interop.DeterministicallyGenerateKeys(0, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisValidatorRoot := bytesutil.PadTo([]byte("genesisValidatorRoot"), fieldparams.RootLength)
|
||||
|
||||
_ = secretKeys
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
scs []*eth.SignedConsolidation
|
||||
check func(*testing.T, state.BeaconState)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
scs: make([]*eth.SignedConsolidation, 10),
|
||||
wantErr: "nil state",
|
||||
},
|
||||
{
|
||||
name: "nil consolidation in slice",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{nil, nil},
|
||||
wantErr: "nil consolidation",
|
||||
},
|
||||
{
|
||||
name: "state is 100% full of pending consolidations",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
pc := make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit)
|
||||
require.NoError(t, st.SetPendingConsolidations(pc))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "pending consolidations queue is full",
|
||||
},
|
||||
{
|
||||
name: "state has too little consolidation churn limit available to process a consolidation",
|
||||
state: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "too little available consolidation churn limit",
|
||||
},
|
||||
{
|
||||
name: "consolidation with source and target as the same index is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 100}}},
|
||||
wantErr: "source and target index are the same",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with future epoch is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 55}}},
|
||||
wantErr: "consolidation is not valid yet",
|
||||
},
|
||||
{
|
||||
name: "source validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "target validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "source and target with different withdrawal credentials is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "source and target have different withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "consolidation with valid signatures is OK",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
require.NoError(t, st.SetGenesisValidatorsRoot(genesisValidatorRoot))
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
target, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
source.PublicKey = publicKeys[0].Marshal()
|
||||
source.WithdrawalCredentials = target.WithdrawalCredentials
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(100, source))
|
||||
target.PublicKey = publicKeys[1].Marshal()
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, target))
|
||||
return st
|
||||
}(),
|
||||
scs: func() []*eth.SignedConsolidation {
|
||||
sc := ð.SignedConsolidation{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 8}}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil,
|
||||
genesisValidatorRoot,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(sc.Message, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sig0 := secretKeys[0].Sign(sr[:])
|
||||
sig1 := secretKeys[1].Sign(sr[:])
|
||||
|
||||
sc.Signature = blst.AggregateSignatures([]common.Signature{sig0, sig1}).Marshal()
|
||||
|
||||
return []*eth.SignedConsolidation{sc}
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
// The consolidated validator is exiting.
|
||||
require.Equal(t, primitives.Epoch(15), source.ExitEpoch) // 15 = state.Epoch(10) + MIN_SEED_LOOKAHEAD(4) + 1
|
||||
require.Equal(t, primitives.Epoch(15+params.BeaconConfig().MinValidatorWithdrawabilityDelay), source.WithdrawableEpoch)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidations(context.TODO(), tt.state, tt.scs)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
87
beacon-chain/core/electra/deposits.go
Normal file
87
beacon-chain/core/electra/deposits.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
//
|
||||
// for deposit in state.pending_balance_deposits:
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
//
|
||||
// if len(state.pending_balance_deposits) == 0:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
depBalToConsume, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
|
||||
nextDepositIndex := 0
|
||||
|
||||
deposits, err := st.PendingBalanceDeposits()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if primitives.Gwei(deposit.Amount) > availableForProcessing {
|
||||
break
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
availableForProcessing -= primitives.Gwei(deposit.Amount)
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
deposits = deposits[nextDepositIndex:]
|
||||
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(deposits) == 0 {
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
} else {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
// TODO: replace with 6110 logic
|
||||
// return b.ProcessDepositRequests(beaconState, requests)
|
||||
return beaconState, nil
|
||||
}
|
||||
128
beacon-chain/core/electra/deposits_test.go
Normal file
128
beacon-chain/core/electra/deposits_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
wantErr bool
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "nil state fails",
|
||||
state: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no deposits resets balance to consume",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more deposits than balance to consume processes partial deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
}
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less deposits than balance to consume processes all deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var tab uint64
|
||||
var err error
|
||||
if tt.state != nil {
|
||||
// The caller of this method would normally have the precompute balance values for total
|
||||
// active balance for this epoch. For ease of test setup, we will compute total active
|
||||
// balance from the given state.
|
||||
tab, err = helpers.TotalActiveBalance(tt.state)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
65
beacon-chain/core/electra/effective_balance_updates.go
Normal file
65
beacon-chain/core/electra/effective_balance_updates.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
// # Update effective balances with hysteresis
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// balance = state.balances[index]
|
||||
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
|
||||
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
||||
// EFFECTIVE_BALANCE_LIMIT = (
|
||||
// MAX_EFFECTIVE_BALANCE_EIP7251 if has_compounding_withdrawal_credential(validator)
|
||||
// else MIN_ACTIVATION_BALANCE
|
||||
// )
|
||||
//
|
||||
// if (
|
||||
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
// or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
// ):
|
||||
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
|
||||
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
bals := state.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
if val == nil {
|
||||
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
|
||||
val.EffectiveBalance = effectiveBal
|
||||
return false, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
return state.ApplyToEveryValidator(validatorFunc)
|
||||
}
|
||||
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessEffectiveBalnceUpdates(t *testing.T) {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
wantErr bool
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "validator with compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalanceElectra, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator without compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
|
||||
WithdrawalCredentials: nil,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator effective balance moves only when outside of threshold",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold - 1, // beyond downward threshold
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold + 1, // within downward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold + 1, // beyond upward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold - 1, // within upward threshold
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
// validator 0 has a balance diff exceeding the threshold so a diff should be applied to
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// validator 1 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
|
||||
// Validator 2 has a balance diff exceeding the threshold so a diff should be applied to the
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err = bs.ValidatorAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// Validator 3 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessEffectiveBalanceUpdates(tt.state)
|
||||
require.Equal(t, tt.wantErr, err != nil, "unexpected error returned wanted error=nil (%s), got error=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
34
beacon-chain/core/electra/registry_updates.go
Normal file
34
beacon-chain/core/electra/registry_updates.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
)
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
//
|
||||
// if (
|
||||
// is_active_validator(validator, get_current_epoch(state))
|
||||
// and validator.effective_balance <= EJECTION_BALANCE
|
||||
// ):
|
||||
// initiate_validator_exit(state, ValidatorIndex(index))
|
||||
//
|
||||
// # Activate all eligible validators
|
||||
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
// for validator in state.validators:
|
||||
// if is_eligible_for_activation(state, validator):
|
||||
// validator.activation_epoch = activation_epoch
|
||||
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
// TODO: replace with real implementation
|
||||
return state, nil
|
||||
}
|
||||
129
beacon-chain/core/electra/transition.go
Normal file
129
beacon-chain/core/electra/transition.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Re-exports for methods that haven't changed in Electra.
|
||||
var (
|
||||
InitializePrecomputeValidators = altair.InitializePrecomputeValidators
|
||||
ProcessEpochParticipation = altair.ProcessEpochParticipation
|
||||
ProcessInactivityScores = altair.ProcessInactivityScores
|
||||
ProcessRewardsAndPenaltiesPrecompute = altair.ProcessRewardsAndPenaltiesPrecompute
|
||||
ProcessSlashings = e.ProcessSlashings
|
||||
ProcessEth1DataReset = e.ProcessEth1DataReset
|
||||
ProcessSlashingsReset = e.ProcessSlashingsReset
|
||||
ProcessRandaoMixesReset = e.ProcessRandaoMixesReset
|
||||
ProcessHistoricalDataUpdate = e.ProcessHistoricalDataUpdate
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_epoch(state: BeaconState) -> None:
|
||||
// process_justification_and_finalization(state)
|
||||
// process_inactivity_updates(state)
|
||||
// process_rewards_and_penalties(state)
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_balance_deposits(state) # New in EIP7251
|
||||
// process_pending_consolidations(state) # New in EIP7251
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
311
beacon-chain/core/electra/upgrade.go
Normal file
311
beacon-chain/core/electra/upgrade.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
// def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||
//
|
||||
// epoch = deneb.get_current_epoch(pre)
|
||||
// latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
// parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||
// fee_recipient=pre.latest_execution_payload_header.fee_recipient,
|
||||
// state_root=pre.latest_execution_payload_header.state_root,
|
||||
// receipts_root=pre.latest_execution_payload_header.receipts_root,
|
||||
// logs_bloom=pre.latest_execution_payload_header.logs_bloom,
|
||||
// prev_randao=pre.latest_execution_payload_header.prev_randao,
|
||||
// block_number=pre.latest_execution_payload_header.block_number,
|
||||
// gas_limit=pre.latest_execution_payload_header.gas_limit,
|
||||
// gas_used=pre.latest_execution_payload_header.gas_used,
|
||||
// timestamp=pre.latest_execution_payload_header.timestamp,
|
||||
// extra_data=pre.latest_execution_payload_header.extra_data,
|
||||
// base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
// block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
// transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// )
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// if not exit_epochs:
|
||||
// exit_epochs = [get_current_epoch(pre)]
|
||||
// earliest_exit_epoch = max(exit_epochs) + 1
|
||||
//
|
||||
// post = BeaconState(
|
||||
// # Versioning
|
||||
// genesis_time=pre.genesis_time,
|
||||
// genesis_validators_root=pre.genesis_validators_root,
|
||||
// slot=pre.slot,
|
||||
// fork=Fork(
|
||||
// previous_version=pre.fork.current_version,
|
||||
// current_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110]
|
||||
// epoch=epoch,
|
||||
// ),
|
||||
// # History
|
||||
// latest_block_header=pre.latest_block_header,
|
||||
// block_roots=pre.block_roots,
|
||||
// state_roots=pre.state_roots,
|
||||
// historical_roots=pre.historical_roots,
|
||||
// # Eth1
|
||||
// eth1_data=pre.eth1_data,
|
||||
// eth1_data_votes=pre.eth1_data_votes,
|
||||
// eth1_deposit_index=pre.eth1_deposit_index,
|
||||
// # Registry
|
||||
// validators=pre.validators,
|
||||
// balances=pre.balances,
|
||||
// # Randomness
|
||||
// randao_mixes=pre.randao_mixes,
|
||||
// # Slashings
|
||||
// slashings=pre.slashings,
|
||||
// # Participation
|
||||
// previous_epoch_participation=pre.previous_epoch_participation,
|
||||
// current_epoch_participation=pre.current_epoch_participation,
|
||||
// # Finality
|
||||
// justification_bits=pre.justification_bits,
|
||||
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
// current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
// finalized_checkpoint=pre.finalized_checkpoint,
|
||||
// # Inactivity
|
||||
// inactivity_scores=pre.inactivity_scores,
|
||||
// # Sync
|
||||
// current_sync_committee=pre.current_sync_committee,
|
||||
// next_sync_committee=pre.next_sync_committee,
|
||||
// # Execution-layer
|
||||
// latest_execution_payload_header=latest_execution_payload_header, # [Modified in Electra:EIP6110:EIP7002]
|
||||
// # Withdrawals
|
||||
// next_withdrawal_index=pre.next_withdrawal_index,
|
||||
// next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
// # Deep history valid from Capella onwards
|
||||
// historical_summaries=pre.historical_summaries,
|
||||
// # [New in Electra:EIP6110]
|
||||
// deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
// # [New in Electra:EIP7251]
|
||||
// deposit_balance_to_consume=0,
|
||||
// exit_balance_to_consume=0,
|
||||
// earliest_exit_epoch=earliest_exit_epoch,
|
||||
// consolidation_balance_to_consume=0,
|
||||
// earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
|
||||
// pending_balance_deposits=[],
|
||||
// pending_partial_withdrawals=[],
|
||||
// pending_consolidations=[],
|
||||
// )
|
||||
//
|
||||
// post.exit_balance_to_consume = get_activation_exit_churn_limit(post)
|
||||
// post.consolidation_balance_to_consume = get_consolidation_churn_limit(post)
|
||||
//
|
||||
// # [New in Electra:EIP7251]
|
||||
// # add validators that are not yet active to pending balance deposits
|
||||
// pre_activation = sorted([
|
||||
// index for index, validator in enumerate(post.validators)
|
||||
// if validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
// ], key=lambda index: (
|
||||
// post.validators[index].activation_eligibility_epoch,
|
||||
// index
|
||||
// ))
|
||||
//
|
||||
// for index in pre_activation:
|
||||
// queue_entire_balance_and_reset_validator(post, ValidatorIndex(index))
|
||||
//
|
||||
// # Ensure early adopters of compounding credentials go through the activation churn
|
||||
// for index, validator in enumerate(post.validators):
|
||||
// if has_compounding_withdrawal_credential(validator):
|
||||
// queue_excess_active_balance(post, ValidatorIndex(index))
|
||||
//
|
||||
// return post
|
||||
func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := beaconState.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// [New in Electra:EIP7251]
|
||||
earliestExitEpoch := time.CurrentEpoch(beaconState)
|
||||
preActivationIndices := make([]primitives.ValidatorIndex, 0)
|
||||
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
|
||||
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
|
||||
earliestExitEpoch = val.ExitEpoch()
|
||||
}
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
earliestExitEpoch++ // Increment to find the earliest possible exit epoch
|
||||
|
||||
// note: should be the same in prestate and post beaconState.
|
||||
// we are deviating from the specs a bit as it calls for using the post beaconState
|
||||
tab, err := helpers.TotalActiveBalance(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get total active balance")
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// Sorting preActivationIndices based on a custom criteria
|
||||
sort.Slice(preActivationIndices, func(i, j int) bool {
|
||||
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
|
||||
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
|
||||
return preActivationIndices[i] < preActivationIndices[j]
|
||||
}
|
||||
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
|
||||
for _, index := range preActivationIndices {
|
||||
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure early adopters of compounding credentials go through the activation churn
|
||||
for _, index := range compoundWithdrawalIndices {
|
||||
if err := QueueExcessActiveBalance(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue excess active balance")
|
||||
}
|
||||
}
|
||||
|
||||
return post, nil
|
||||
}
|
||||
188
beacon-chain/core/electra/upgrade_test.go
Normal file
188
beacon-chain/core/electra/upgrade_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestUpgradeToElectra(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
|
||||
vals := st.Validators()
|
||||
vals[0].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
vals[1].WithdrawalCredentials = []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[1] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
preForkState := st.Copy()
|
||||
mSt, err := electra.UpgradeToElectra(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Validators()[2:], mSt.Validators()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Balances()[2:], mSt.Balances()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
|
||||
require.Equal(t, len(preForkState.Validators()), len(mSt.Validators()))
|
||||
|
||||
preVal, err := preForkState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal.EffectiveBalance)
|
||||
|
||||
preVal2, err := preForkState.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal2.EffectiveBalance)
|
||||
|
||||
mVal, err := mSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), mVal.EffectiveBalance)
|
||||
|
||||
mVal2, err := mSt.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, mVal2.EffectiveBalance)
|
||||
|
||||
numValidators := mSt.NumValidators()
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, nsc)
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
|
||||
nwi, err := mSt.NextWithdrawalIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), nwi)
|
||||
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
startIndex, err := mSt.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositRequestsStartIndex, startIndex)
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), balance)
|
||||
|
||||
tab, err := helpers.TotalActiveBalance(mSt)
|
||||
require.NoError(t, err)
|
||||
|
||||
ebtc, err := mSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitChurnLimit(primitives.Gwei(tab)), ebtc)
|
||||
|
||||
eee, err := mSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(1), eee)
|
||||
|
||||
cbtc, err := mSt.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ConsolidationChurnLimit(primitives.Gwei(tab)), cbtc)
|
||||
|
||||
earliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
|
||||
|
||||
pendingBalanceDeposits, err := mSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pendingBalanceDeposits))
|
||||
require.Equal(t, uint64(1000), pendingBalanceDeposits[1].Amount)
|
||||
|
||||
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), numPendingPartialWithdrawals)
|
||||
|
||||
consolidations, err := mSt.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(consolidations))
|
||||
|
||||
}
|
||||
104
beacon-chain/core/electra/validator.go
Normal file
104
beacon-chain/core/electra/validator.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// SwitchToCompoundingValidator
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// validator = state.validators[index]
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
if helpers.HasETH1WithdrawalCredential(v) {
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.EffectiveBalance = 0
|
||||
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
}
|
||||
144
beacon-chain/core/electra/validator_test.go
Normal file
144
beacon-chain/core/electra/validator_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{}, // No withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
|
||||
},
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
// val_1 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, uint64(100_000), pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance + 100_000,
|
||||
ActivationEligibilityEpoch: primitives.Epoch(100),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance + 100_000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(s, 0))
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), b, "balance was not changed")
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), v.EffectiveBalance, "effective balance was not reset")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, v.ActivationEligibilityEpoch, "activation eligibility epoch was not reset")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
vals := st.Validators()
|
||||
vals[0].WithdrawalCredentials = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1010
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
bytes.HasPrefix(val.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := electra.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
// need to manually set this to 0 as after 6110 these balances are now 0 and instead populates pending balance deposits
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance - 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
|
||||
bal, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), bal)
|
||||
}
|
||||
194
beacon-chain/core/electra/withdrawals.go
Normal file
194
beacon-chain/core/electra/withdrawals.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state triggered by the execution layer.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawal_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// withdrawal_request: WithdrawalRequest
|
||||
//
|
||||
// ) -> None:
|
||||
// amount = execution_layer_withdrawal_request.amount
|
||||
// is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||
//
|
||||
// # If partial withdrawal queue is full, only full exits are processed
|
||||
// if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkey exists
|
||||
// request_pubkey = execution_layer_withdrawal_request.validator_pubkey
|
||||
// if request_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
||||
// validator = state.validators[index]
|
||||
//
|
||||
// # Verify withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(validator)
|
||||
// is_correct_source_address = (
|
||||
// validator.withdrawal_credentials[12:] == execution_layer_withdrawal_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
// # Verify the validator is active
|
||||
// if not is_active_validator(validator, get_current_epoch(state)):
|
||||
// return
|
||||
// # Verify exit has not been initiated
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// # Verify the validator has been active long enough
|
||||
// if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index)
|
||||
//
|
||||
// if is_full_exit_request:
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// if pending_balance_to_withdraw == 0:
|
||||
// initiate_validator_exit(state, index)
|
||||
// return
|
||||
//
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
//
|
||||
// # Only allow partial withdrawals with compounding withdrawal credentials
|
||||
// if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
|
||||
// withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
// state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
|
||||
// index=index,
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
for _, wr := range wrs {
|
||||
if wr == nil {
|
||||
return nil, errors.New("nil execution layer withdrawal request")
|
||||
}
|
||||
amount := wr.Amount
|
||||
isFullExitRequest := amount == params.BeaconConfig().FullExitRequestAmount
|
||||
// If partial withdrawal queue is full, only full exits are processed
|
||||
if n, err := st.NumPendingPartialWithdrawals(); err != nil {
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
isCorrectSourceAddress := bytes.Equal(validator.WithdrawalCredentials[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch.AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
pendingBalanceToWithdraw, err := st.PendingBalanceToWithdraw(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
vBal, err := st.BalanceAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasExcessBalance := vBal > params.BeaconConfig().MinActivationBalance+pendingBalanceToWithdraw
|
||||
|
||||
// Only allow partial withdrawals with compounding withdrawal credentials
|
||||
if helpers.HasCompoundingWithdrawalCredential(validator) && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
// Spec definition:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
|
||||
// note: you can safely subtract these values because haxExcessBalance is checked
|
||||
toWithdraw := min(vBal-params.BeaconConfig().MinActivationBalance-pendingBalanceToWithdraw, amount)
|
||||
exitQueueEpoch, err := st.ExitEpochAndUpdateChurn(primitives.Gwei(toWithdraw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// safe add the uint64 to avoid overflow
|
||||
withdrawableEpoch, err := exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add withdrawability delay to exit queue epoch")
|
||||
}
|
||||
if err := st.AppendPendingPartialWithdrawal(ðpb.PendingPartialWithdrawal{
|
||||
Index: vIdx,
|
||||
Amount: toWithdraw,
|
||||
WithdrawableEpoch: withdrawableEpoch,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user