mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
162 Commits
electra-up
...
disablingD
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c979fb57cc | ||
|
|
f4f596aa05 | ||
|
|
bc3ff69448 | ||
|
|
8055b5e639 | ||
|
|
3fb1fa47d1 | ||
|
|
158f4aec9d | ||
|
|
addb69b804 | ||
|
|
cf029aaadf | ||
|
|
ca92a955b8 | ||
|
|
7945cb8477 | ||
|
|
bb803a2cf4 | ||
|
|
42c6bbc4ec | ||
|
|
1f91169554 | ||
|
|
bc1e0d1dd5 | ||
|
|
ec7bbc856b | ||
|
|
00daa4ad23 | ||
|
|
e05217bcef | ||
|
|
036c10a12f | ||
|
|
4329c511ab | ||
|
|
e6cb7d032f | ||
|
|
625cb88d78 | ||
|
|
bc428fe3d7 | ||
|
|
2bbab6d945 | ||
|
|
ecd37bf584 | ||
|
|
6be17b151b | ||
|
|
769f4bd69f | ||
|
|
a713d69f6e | ||
|
|
e92b0e9cc2 | ||
|
|
81bad0ec82 | ||
|
|
889396c44c | ||
|
|
c2d2d8d727 | ||
|
|
89871ec39d | ||
|
|
5d85e47968 | ||
|
|
ded08c87e5 | ||
|
|
69a2a7e71d | ||
|
|
6653f835b1 | ||
|
|
364c95362b | ||
|
|
85f9820e31 | ||
|
|
ab0cd8e0b6 | ||
|
|
2e82b31d2e | ||
|
|
a42d48265d | ||
|
|
da9a60427a | ||
|
|
b2b4efe84d | ||
|
|
5083b23483 | ||
|
|
74e2fb43e3 | ||
|
|
ab5da74e73 | ||
|
|
aaf69101e1 | ||
|
|
fa1b02926b | ||
|
|
dc88dcea92 | ||
|
|
5784e94dc0 | ||
|
|
3d2cf28624 | ||
|
|
e2e68f57e0 | ||
|
|
581f8f505e | ||
|
|
61208142ff | ||
|
|
a910c370e2 | ||
|
|
b4536d4494 | ||
|
|
e176dfa613 | ||
|
|
b049018c0f | ||
|
|
fe8c86e0db | ||
|
|
1eb0e50746 | ||
|
|
d8bcce7689 | ||
|
|
ff15d86ff3 | ||
|
|
068139a78a | ||
|
|
8dd7361b6a | ||
|
|
41ea1d230a | ||
|
|
9e25026519 | ||
|
|
9e9559df60 | ||
|
|
7a5a6c7e54 | ||
|
|
be317c439d | ||
|
|
f43383a3fb | ||
|
|
22f6f787e1 | ||
|
|
44b3986025 | ||
|
|
6cb845660a | ||
|
|
de2c866707 | ||
|
|
74ddb84e0a | ||
|
|
0c6a068fd5 | ||
|
|
fad92472d8 | ||
|
|
2a44e8e6ec | ||
|
|
e3d27f29c7 | ||
|
|
e011f05403 | ||
|
|
b8cd77945d | ||
|
|
9a7f521f8a | ||
|
|
102f94f914 | ||
|
|
0c0a497651 | ||
|
|
e0785a8939 | ||
|
|
af098e737e | ||
|
|
1e4ede5585 | ||
|
|
fb2620364a | ||
|
|
68b38b6666 | ||
|
|
ff3e0856a1 | ||
|
|
85f334b663 | ||
|
|
10f520accb | ||
|
|
836608537e | ||
|
|
13e09c58f6 | ||
|
|
600ca08aa8 | ||
|
|
0ed74b3c4a | ||
|
|
7c69a9aa1c | ||
|
|
c50cfb044a | ||
|
|
38d4e179ba | ||
|
|
be80728320 | ||
|
|
09028033c0 | ||
|
|
52c036c3ab | ||
|
|
2fc7cdeba7 | ||
|
|
6f7976766d | ||
|
|
5c369361b0 | ||
|
|
7d48b45152 | ||
|
|
345aabe996 | ||
|
|
5d04b36680 | ||
|
|
cd8907f76c | ||
|
|
b108d5bf54 | ||
|
|
4d823acf45 | ||
|
|
aa868e5e8c | ||
|
|
b1be6cd20b | ||
|
|
fd9321f6ba | ||
|
|
8364226b68 | ||
|
|
49055acf81 | ||
|
|
57ffc12f17 | ||
|
|
d066480a51 | ||
|
|
0e8f98b2a4 | ||
|
|
3a734f51e0 | ||
|
|
c7e2d709cf | ||
|
|
8b4b3a269b | ||
|
|
2f76ba542f | ||
|
|
637cbc88e8 | ||
|
|
fadff022a0 | ||
|
|
05784a6c28 | ||
|
|
5a48e002dd | ||
|
|
d6f86269a4 | ||
|
|
422438f515 | ||
|
|
e5b25071f9 | ||
|
|
498ee635e1 | ||
|
|
c238c00630 | ||
|
|
3eacc37831 | ||
|
|
5267b4b4d4 | ||
|
|
ec84a1b49c | ||
|
|
afb7383225 | ||
|
|
a00b40fa81 | ||
|
|
365c6252ba | ||
|
|
7c81c7da90 | ||
|
|
f8950c8c40 | ||
|
|
18be899aef | ||
|
|
9dc3b645c4 | ||
|
|
318561999d | ||
|
|
ae5b0b4391 | ||
|
|
74b5f6ecf2 | ||
|
|
aea2a469cc | ||
|
|
7a394062e1 | ||
|
|
8070fc8ece | ||
|
|
d6aeaf77b3 | ||
|
|
40434ac209 | ||
|
|
0aab919d7c | ||
|
|
a8ecf5d118 | ||
|
|
8c0d6b27d0 | ||
|
|
2e6f1de29a | ||
|
|
657750b803 | ||
|
|
af5eb82217 | ||
|
|
84e7f33fd9 | ||
|
|
ca83d29eef | ||
|
|
0d49f6c142 | ||
|
|
041e81aad2 | ||
|
|
028504ae9a | ||
|
|
91c55c6880 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.21-alpine
|
||||
FROM golang:1.22-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -28,14 +28,14 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.4'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.19.0
|
||||
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.4'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.56.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.4'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -6,7 +6,7 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.21.5'
|
||||
go: '1.22.4'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
@@ -34,7 +34,6 @@ linters:
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
@@ -52,6 +51,7 @@ linters:
|
||||
- gofumpt
|
||||
- gomnd
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
@@ -73,6 +73,7 @@ linters:
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
|
||||
12
WORKSPACE
12
WORKSPACE
@@ -182,7 +182,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.21.8",
|
||||
go_version = "1.22.4",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.2"
|
||||
consensus_spec_version = "v1.5.0-alpha.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
|
||||
integrity = "sha256-R9vG5HEL5eGMOAmbkKfJ2jfelNqL5V0xBUPiXOiGM6U=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
|
||||
integrity = "sha256-AEIiEOlf1XuxoRMCsN+kgJMo4LrS05+biTA1p/7Ro00=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
|
||||
integrity = "sha256-LH/Xr20yrJRYnbpjRGupMWTIOWt3cpxZJWXgThwVDsk=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
|
||||
integrity = "sha256-mlytz4MPjKh0DwV7FMiAtnRbJw9B6o78/x66/vmnYc8=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -9,22 +9,20 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
type testRT struct {
|
||||
|
||||
@@ -55,6 +55,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -1914,7 +1915,8 @@ func TestEmptyResponseBody(t *testing.T) {
|
||||
var b []byte
|
||||
r := &ExecutionPayloadResponse{}
|
||||
err := json.Unmarshal(b, r)
|
||||
_, ok := err.(*json.SyntaxError)
|
||||
var syntaxError *json.SyntaxError
|
||||
ok := errors.As(err, &syntaxError)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("empty object", func(t *testing.T) {
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["event_stream.go"],
|
||||
srcs = [
|
||||
"event_stream.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@@ -15,7 +18,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["event_stream_test.go"],
|
||||
srcs = [
|
||||
"event_stream_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -102,6 +102,8 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
}()
|
||||
// Create a new scanner to read lines from the response body
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
// Set the split function for the scanning operation
|
||||
scanner.Split(scanLinesWithCarriage)
|
||||
|
||||
var eventType, data string // Variables to store event type and data
|
||||
|
||||
@@ -113,7 +115,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
close(eventsChannel)
|
||||
return
|
||||
default:
|
||||
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
|
||||
line := scanner.Text()
|
||||
// Handle the event based on your specific format
|
||||
if line == "" {
|
||||
// Empty line indicates the end of an event
|
||||
|
||||
@@ -43,8 +43,9 @@ func TestEventStream(t *testing.T) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
|
||||
for i := 1; i <= 3; i++ {
|
||||
events := [3]string{"event: head\ndata: data%d\n\n", "event: head\rdata: data%d\r\r", "event: head\r\ndata: data%d\r\n\r\n"}
|
||||
_, err := fmt.Fprintf(w, events[i-1], i)
|
||||
require.NoError(t, err)
|
||||
flusher.Flush() // Trigger flush to simulate streaming data
|
||||
time.Sleep(100 * time.Millisecond) // Simulate delay between events
|
||||
@@ -62,7 +63,7 @@ func TestEventStream(t *testing.T) {
|
||||
// Collect events
|
||||
var events []*Event
|
||||
|
||||
for len(events) != 2 {
|
||||
for len(events) != 3 {
|
||||
select {
|
||||
case event := <-eventsChannel:
|
||||
log.Info(event)
|
||||
@@ -71,7 +72,7 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
|
||||
// Assertions to verify the events content
|
||||
expectedData := []string{"data1", "data2"}
|
||||
expectedData := []string{"data1", "data2", "data3"}
|
||||
for i, event := range events {
|
||||
if string(event.Data) != expectedData[i] {
|
||||
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
|
||||
|
||||
36
api/client/event/utils.go
Normal file
36
api/client/event/utils.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// adapted from ScanLines in scan.go to handle carriage return characters as separators
|
||||
func scanLinesWithCarriage(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i, j := bytes.IndexByte(data, '\n'), bytes.IndexByte(data, '\r'); i >= 0 || j >= 0 {
|
||||
in := i
|
||||
// Select the first index of \n or \r or the second index of \r if it is followed by \n
|
||||
if i < 0 || (i > j && i != j+1 && j >= 0) {
|
||||
in = j
|
||||
}
|
||||
|
||||
// We have a full newline-terminated line.
|
||||
return in + 1, dropCR(data[0:in]), nil
|
||||
}
|
||||
// If we're at EOF, we have a final, non-terminated line. Return it.
|
||||
if atEOF {
|
||||
return len(data), dropCR(data), nil
|
||||
}
|
||||
// Request more data.
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// dropCR drops a terminal \r from the data.
|
||||
func dropCR(data []byte) []byte {
|
||||
if len(data) > 0 && data[len(data)-1] == '\r' {
|
||||
return data[0 : len(data)-1]
|
||||
}
|
||||
return data
|
||||
}
|
||||
97
api/client/event/utils_test.go
Normal file
97
api/client/event/utils_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestScanLinesWithCarriage(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "LF line endings",
|
||||
input: "line1\nline2\nline3",
|
||||
expected: []string{"line1", "line2", "line3"},
|
||||
},
|
||||
{
|
||||
name: "CR line endings",
|
||||
input: "line1\rline2\rline3",
|
||||
expected: []string{"line1", "line2", "line3"},
|
||||
},
|
||||
{
|
||||
name: "CRLF line endings",
|
||||
input: "line1\r\nline2\r\nline3",
|
||||
expected: []string{"line1", "line2", "line3"},
|
||||
},
|
||||
{
|
||||
name: "Mixed line endings",
|
||||
input: "line1\nline2\rline3\r\nline4",
|
||||
expected: []string{"line1", "line2", "line3", "line4"},
|
||||
},
|
||||
{
|
||||
name: "Empty lines",
|
||||
input: "line1\n\nline2\r\rline3",
|
||||
expected: []string{"line1", "", "line2", "", "line3"},
|
||||
},
|
||||
{
|
||||
name: "Empty lines 2",
|
||||
input: "line1\n\rline2\n\rline3",
|
||||
expected: []string{"line1", "", "line2", "", "line3"},
|
||||
},
|
||||
{
|
||||
name: "No line endings",
|
||||
input: "single line without ending",
|
||||
expected: []string{"single line without ending"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader([]byte(tc.input)))
|
||||
scanner.Split(scanLinesWithCarriage)
|
||||
|
||||
var lines []string
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
}
|
||||
|
||||
require.NoError(t, scanner.Err())
|
||||
require.Equal(t, len(tc.expected), len(lines), "Number of lines does not match")
|
||||
for i, line := range lines {
|
||||
require.Equal(t, tc.expected[i], line, "Line %d does not match", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestScanLinesWithCarriageEdgeCases tests edge cases and potential error scenarios
|
||||
func TestScanLinesWithCarriageEdgeCases(t *testing.T) {
|
||||
t.Run("Empty input", func(t *testing.T) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader([]byte("")))
|
||||
scanner.Split(scanLinesWithCarriage)
|
||||
require.Equal(t, scanner.Scan(), false)
|
||||
require.NoError(t, scanner.Err())
|
||||
})
|
||||
|
||||
t.Run("Very long line", func(t *testing.T) {
|
||||
longLine := bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize+1)
|
||||
scanner := bufio.NewScanner(bytes.NewReader(longLine))
|
||||
scanner.Split(scanLinesWithCarriage)
|
||||
require.Equal(t, scanner.Scan(), false)
|
||||
require.NotNil(t, scanner.Err())
|
||||
})
|
||||
|
||||
t.Run("Line ending at max token size", func(t *testing.T) {
|
||||
input := append(bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize-1), '\n')
|
||||
scanner := bufio.NewScanner(bytes.NewReader(input))
|
||||
scanner.Split(scanLinesWithCarriage)
|
||||
require.Equal(t, scanner.Scan(), true)
|
||||
require.Equal(t, string(bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize-1)), scanner.Text())
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
@@ -15,7 +16,8 @@ type DecodeError struct {
|
||||
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
|
||||
// The current field that failed decoding must be passed in.
|
||||
func NewDecodeError(err error, field string) *DecodeError {
|
||||
de, ok := err.(*DecodeError)
|
||||
var de *DecodeError
|
||||
ok := errors.As(err, &de)
|
||||
if ok {
|
||||
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
|
||||
}
|
||||
|
||||
@@ -317,6 +317,94 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockContentsElectra struct {
|
||||
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type BeaconBlockContentsElectra struct {
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockElectra struct {
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockElectra struct {
|
||||
Message *BlindedBeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
Header *SignedBeaconBlockHeader `json:"header"`
|
||||
Root string `json:"root"`
|
||||
@@ -445,3 +533,49 @@ type ExecutionPayloadHeaderDeneb struct {
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequests []*DepositRequest `json:"deposit_requests"`
|
||||
WithdrawalRequests []*WithdrawalRequest `json:"withdrawal_requests"`
|
||||
ConsolidationRequests []*ConsolidationRequest `json:"consolidation_requests"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequestsRoot string `json:"deposit_requests_root"`
|
||||
WithdrawalRequestsRoot string `json:"withdrawal_requests_root"`
|
||||
ConsolidationRequestsRoot string `json:"consolidation_requests_root"`
|
||||
}
|
||||
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -341,6 +340,42 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SignedAggregateAttestationAndProofElectra{
|
||||
Message: msg,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
|
||||
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
return ð.AggregateAttestationAndProofElectra{
|
||||
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
|
||||
Aggregate: agg,
|
||||
SelectionProof: proof,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
@@ -370,6 +405,41 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
committeeBits, err := hexutil.Decode(a.CommitteeBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitteeBits")
|
||||
}
|
||||
|
||||
return ð.AttestationElectra{
|
||||
AggregationBits: aggBits,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
CommitteeBits: committeeBits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
|
||||
return &AttestationElectra{
|
||||
AggregationBits: hexutil.Encode(a.AggregationBits),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
CommitteeBits: hexutil.Encode(a.CommitteeBits),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
slot, err := strconv.ParseUint(a.Slot, 10, 64)
|
||||
if err != nil {
|
||||
@@ -624,6 +694,18 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
return ð.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
}
|
||||
return ð.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
@@ -649,6 +731,31 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
indices[i], err = strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.IndexedAttestationElectra{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
|
||||
result := make([]*Withdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
@@ -666,6 +773,126 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
|
||||
}
|
||||
}
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -931,6 +1158,138 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
}
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
|
||||
}
|
||||
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
|
||||
for j, ix := range s.Attestation1.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
|
||||
}
|
||||
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
|
||||
for j, ix := range s.Attestation2.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a2AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a2Data, err := s.Attestation2.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
attesterSlashings[i] = ð.AttesterSlashingElectra{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: a1Data,
|
||||
Signature: a1Sig,
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: a2Data,
|
||||
Signature: a2Sig,
|
||||
},
|
||||
}
|
||||
}
|
||||
return attesterSlashings, nil
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
|
||||
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
|
||||
}
|
||||
return attesterSlashings
|
||||
}
|
||||
|
||||
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
|
||||
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
|
||||
for j, ix := range src.Attestation_1.AttestingIndices {
|
||||
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
|
||||
for j, ix := range src.Attestation_2.AttestingIndices {
|
||||
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
return &AttesterSlashingElectra{
|
||||
Attestation1: &IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_1.Signature),
|
||||
},
|
||||
Attestation2: &IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_2.Signature),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -958,6 +1317,33 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
return atts
|
||||
}
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
}
|
||||
return atts, nil
|
||||
}
|
||||
|
||||
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
|
||||
atts := make([]*AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i] = AttElectraFromConsensus(a)
|
||||
}
|
||||
return atts
|
||||
}
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -1088,3 +1474,37 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
|
||||
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
|
||||
}
|
||||
}
|
||||
|
||||
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
|
||||
deposits := make([]*PendingBalanceDeposit, len(ds))
|
||||
for i, d := range ds {
|
||||
deposits[i] = &PendingBalanceDeposit{
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
|
||||
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
withdrawals[i] = &PendingPartialWithdrawal{
|
||||
Index: fmt.Sprintf("%d", w.Index),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
|
||||
consolidations := make([]*PendingConsolidation, len(cs))
|
||||
for i, c := range cs {
|
||||
consolidations[i] = &PendingConsolidation{
|
||||
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
|
||||
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
|
||||
}
|
||||
}
|
||||
return consolidations
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -593,3 +593,185 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
HistoricalSummaries: hs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execData, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderNotFound
|
||||
}
|
||||
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
LatestExecutionPayloadHeader: payload,
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -196,3 +196,48 @@ type DepositSnapshot struct {
|
||||
ExecutionBlockHash string `json:"execution_block_hash"`
|
||||
ExecutionBlockHeight string `json:"execution_block_height"`
|
||||
}
|
||||
|
||||
type GetIndividualVotesRequest struct {
|
||||
Epoch string `json:"epoch"`
|
||||
PublicKeys []string `json:"public_keys,omitempty"`
|
||||
Indices []string `json:"indices,omitempty"`
|
||||
}
|
||||
|
||||
type GetIndividualVotesResponse struct {
|
||||
IndividualVotes []*IndividualVote `json:"individual_votes"`
|
||||
}
|
||||
|
||||
type IndividualVote struct {
|
||||
Epoch string `json:"epoch"`
|
||||
PublicKey string `json:"public_keys,omitempty"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
IsSlashed bool `json:"is_slashed"`
|
||||
IsWithdrawableInCurrentEpoch bool `json:"is_withdrawable_in_current_epoch"`
|
||||
IsActiveInCurrentEpoch bool `json:"is_active_in_current_epoch"`
|
||||
IsActiveInPreviousEpoch bool `json:"is_active_in_previous_epoch"`
|
||||
IsCurrentEpochAttester bool `json:"is_current_epoch_attester"`
|
||||
IsCurrentEpochTargetAttester bool `json:"is_current_epoch_target_attester"`
|
||||
IsPreviousEpochAttester bool `json:"is_previous_epoch_attester"`
|
||||
IsPreviousEpochTargetAttester bool `json:"is_previous_epoch_target_attester"`
|
||||
IsPreviousEpochHeadAttester bool `json:"is_previous_epoch_head_attester"`
|
||||
CurrentEpochEffectiveBalanceGwei string `json:"current_epoch_effective_balance_gwei"`
|
||||
InclusionSlot string `json:"inclusion_slot"`
|
||||
InclusionDistance string `json:"inclusion_distance"`
|
||||
InactivityScore string `json:"inactivity_score"`
|
||||
}
|
||||
|
||||
type ChainHead struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
HeadEpoch string `json:"head_epoch"`
|
||||
HeadBlockRoot string `json:"head_block_root"`
|
||||
FinalizedSlot string `json:"finalized_slot"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
FinalizedBlockRoot string `json:"finalized_block_root"`
|
||||
JustifiedSlot string `json:"justified_slot"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
JustifiedBlockRoot string `json:"justified_block_root"`
|
||||
PreviousJustifiedSlot string `json:"previous_justified_slot"`
|
||||
PreviousJustifiedEpoch string `json:"previous_justified_epoch"`
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
@@ -118,3 +118,34 @@ type GetValidatorPerformanceResponse struct {
|
||||
MissingValidators [][]byte `json:"missing_validators,omitempty"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
|
||||
}
|
||||
|
||||
type GetValidatorParticipationResponse struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Participation *ValidatorParticipation `json:"participation"`
|
||||
}
|
||||
|
||||
type ValidatorParticipation struct {
|
||||
GlobalParticipationRate string `json:"global_participation_rate" deprecated:"true"`
|
||||
VotedEther string `json:"voted_ether" deprecated:"true"`
|
||||
EligibleEther string `json:"eligible_ether" deprecated:"true"`
|
||||
CurrentEpochActiveGwei string `json:"current_epoch_active_gwei"`
|
||||
CurrentEpochAttestingGwei string `json:"current_epoch_attesting_gwei"`
|
||||
CurrentEpochTargetAttestingGwei string `json:"current_epoch_target_attesting_gwei"`
|
||||
PreviousEpochActiveGwei string `json:"previous_epoch_active_gwei"`
|
||||
PreviousEpochAttestingGwei string `json:"previous_epoch_attesting_gwei"`
|
||||
PreviousEpochTargetAttestingGwei string `json:"previous_epoch_target_attesting_gwei"`
|
||||
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
|
||||
}
|
||||
|
||||
type ActiveSetChanges struct {
|
||||
Epoch string `json:"epoch"`
|
||||
ActivatedPublicKeys []string `json:"activated_public_keys"`
|
||||
ActivatedIndices []string `json:"activated_indices"`
|
||||
ExitedPublicKeys []string `json:"exited_public_keys"`
|
||||
ExitedIndices []string `json:"exited_indices"`
|
||||
SlashedPublicKeys []string `json:"slashed_public_keys"`
|
||||
SlashedIndices []string `json:"slashed_indices"`
|
||||
EjectedPublicKeys []string `json:"ejected_public_keys"`
|
||||
EjectedIndices []string `json:"ejected_indices"`
|
||||
}
|
||||
|
||||
@@ -29,6 +29,13 @@ type Attestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AttestationElectra struct {
|
||||
AggregationBits string `json:"aggregation_bits"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
CommitteeBits string `json:"committee_bits"`
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
Slot string `json:"slot"`
|
||||
CommitteeIndex string `json:"index"`
|
||||
@@ -78,6 +85,17 @@ type AggregateAttestationAndProof struct {
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofElectra struct {
|
||||
Message *AggregateAttestationAndProofElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AggregateAttestationAndProofElectra struct {
|
||||
AggregatorIndex string `json:"aggregator_index"`
|
||||
Aggregate *AttestationElectra `json:"aggregate"`
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SyncCommitteeSubscription struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices"`
|
||||
@@ -178,6 +196,11 @@ type AttesterSlashing struct {
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type AttesterSlashingElectra struct {
|
||||
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
|
||||
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
Proof []string `json:"proof"`
|
||||
Data *DepositData `json:"data"`
|
||||
@@ -196,6 +219,12 @@ type IndexedAttestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type IndexedAttestationElectra struct {
|
||||
AttestingIndices []string `json:"attesting_indices"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature"`
|
||||
@@ -207,3 +236,39 @@ type Withdrawal struct {
|
||||
ExecutionAddress string `json:"address"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingBalanceDeposit struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
@@ -140,3 +140,43 @@ type BeaconStateDeneb struct {
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateElectra struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -159,6 +161,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -49,6 +48,8 @@ type ForkchoiceFetcher interface {
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
|
||||
@@ -22,13 +22,6 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// ShouldOverrideFCU returns the corresponding value from forkchoice
|
||||
func (s *Service) ShouldOverrideFCU() bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -99,3 +92,10 @@ func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// ParentRoot wraps a call to the corresponding method in forkchoice
|
||||
func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ParentRoot(root)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -74,8 +74,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||
switch {
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
@@ -83,7 +83,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
if len(lastValidHash) == 0 {
|
||||
@@ -139,7 +139,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
@@ -231,18 +230,18 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
@@ -324,7 +323,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
|
||||
@@ -855,41 +855,63 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeDeneb(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
st bstate.BeaconState
|
||||
}{
|
||||
{
|
||||
name: "deneb",
|
||||
st: func() bstate.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
st: func() bstate.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
attrV3, err := attr.PbV3()
|
||||
require.NoError(t, err)
|
||||
hr := service.headRoot()
|
||||
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
attrV3, err := attr.PbV3()
|
||||
require.NoError(t, err)
|
||||
hr := service.headRoot()
|
||||
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 0
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
finalityBranchNumOfLeaves = 6
|
||||
FinalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
@@ -69,7 +69,7 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
return nil, fmt.Errorf("could not get sync aggregate %w", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
@@ -85,18 +85,18 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
return nil, fmt.Errorf("could not get state root %w", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
return nil, fmt.Errorf("could not get header root %w", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
return nil, fmt.Errorf("could not get block root %w", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
@@ -114,14 +114,14 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
return nil, fmt.Errorf("could not get attested state root %w", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
return nil, fmt.Errorf("could not get attested header root %w", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
@@ -175,13 +175,13 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
return nil, fmt.Errorf("could not get finalized header %w", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
return nil, fmt.Errorf("could not get finalized header root %w", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
@@ -204,7 +204,7 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
return nil, fmt.Errorf("could not get finalized root proof %w", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
@@ -215,8 +215,8 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, finalityBranchNumOfLeaves)
|
||||
for i := 0; i < finalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch = make([][]byte, FinalityBranchNumOfLeaves)
|
||||
for i := 0; i < FinalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, finalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
require.Equal(t, FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := ethpb.CopyCheckpoint(a.GetData().Target)
|
||||
tgt := a.GetData().Target.Copy()
|
||||
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -143,7 +144,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
|
||||
@@ -500,7 +501,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -514,12 +515,39 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.isDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -549,7 +577,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -568,8 +596,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -591,12 +624,130 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Expected is the number of custody data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
retrievedDataColumns, err := s.blobStorage.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "column indices")
|
||||
}
|
||||
|
||||
retrievedDataColumnsCount := uint64(len(retrievedDataColumns))
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missing[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -679,7 +830,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -7,9 +7,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -17,6 +14,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -25,6 +23,8 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
@@ -286,7 +286,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -312,11 +312,10 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, parentRoot [field_params.RootLength]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := b.ParentRoot()
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// subset of the block object.
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: root[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block().ParentRoot()))
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
@@ -2044,7 +2044,11 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
defaultConfig := util.DefaultBlockGenConfig()
|
||||
defaultConfig.NumWithdrawalRequests = 1
|
||||
defaultConfig.NumDepositRequests = 2
|
||||
defaultConfig.NumConsolidationRequests = 1
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, defaultConfig, 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2059,7 +2063,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, defaultConfig, 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2067,7 +2071,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, defaultConfig, 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -190,13 +191,26 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
var fields logrus.Fields
|
||||
if a.Version() >= version.Electra {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeCount": a.CommitteeBitsVal().Count(),
|
||||
"committeeIndices": a.CommitteeBitsVal().BitIndices(),
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
} else {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -77,59 +83,20 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
|
||||
currentEpoch := coreTime.CurrentEpoch(preState)
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -151,29 +118,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if coreTime.CurrentEpoch(postState) > currentEpoch && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
headSt, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
@@ -193,31 +140,140 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ffgCheckpoints struct {
|
||||
j, f, c primitives.Epoch
|
||||
}
|
||||
|
||||
func (s *Service) saveCurrentCheckpoints(state state.BeaconState) (cp ffgCheckpoints) {
|
||||
// Save current justified and finalized epochs for future use.
|
||||
cp.j = s.CurrentJustifiedCheckpt().Epoch
|
||||
cp.f = s.FinalizedCheckpt().Epoch
|
||||
cp.c = coreTime.CurrentEpoch(state)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) updateCheckpoints(
|
||||
ctx context.Context,
|
||||
cp ffgCheckpoints,
|
||||
preState, postState state.BeaconState,
|
||||
blockRoot [32]byte,
|
||||
) error {
|
||||
if coreTime.CurrentEpoch(postState) > cp.c && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
headSt, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, cp.f)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
receivedTime time.Time,
|
||||
daWaitedTime time.Duration,
|
||||
) {
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
if err := logPayload(blockCopy.Block()); err != nil {
|
||||
if err := logPayload(block.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
if err := logStateTransitionData(block.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -421,14 +420,9 @@ func Test_sendNewFinalizedEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
headState, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
14
beacon-chain/blockchain/receive_data_column.go
Normal file
14
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
@@ -82,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -107,15 +106,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -129,7 +130,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -143,7 +144,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -169,7 +170,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -506,7 +507,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
}
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
return fmt.Errorf("could not load genesis block: %w", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
|
||||
@@ -97,7 +97,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -503,16 +503,15 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
|
||||
func TestStartFromSavedState_ValidatorIndexCacheUpdated(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesis := util.NewBeaconBlockElectra()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock := util.NewBeaconBlockElectra()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -580,7 +579,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -45,6 +46,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
epochDuration := time.Duration(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
minEpochsForDataColumnSidecarsRequest := time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
subLength := epochDuration * minEpochsForDataColumnSidecarsRequest
|
||||
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -79,6 +79,7 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -101,7 +102,8 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom
|
||||
// candidate_index = active_validator_indices[shuffled_index]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
|
||||
// sync_committee_indices.append(candidate_index)
|
||||
// i += 1
|
||||
// return sync_committee_indices
|
||||
@@ -121,6 +123,11 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
|
||||
cIndices := make([]primitives.ValidatorIndex, 0, syncCommitteeSize)
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
maxEB := cfg.MaxEffectiveBalanceElectra
|
||||
if s.Version() < version.Electra {
|
||||
maxEB = cfg.MaxEffectiveBalance
|
||||
}
|
||||
|
||||
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
@@ -140,7 +147,7 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
|
||||
}
|
||||
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
if effectiveBal*maxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
|
||||
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
|
||||
cIndices = append(cIndices, cIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,13 +13,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
)
|
||||
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -27,17 +28,28 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
var st state.BeaconState
|
||||
var err error
|
||||
switch vers {
|
||||
case version.Altair:
|
||||
st, err = state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
case version.Electra:
|
||||
st, err = state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
default:
|
||||
t.Fatal("Unknown version")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
return st
|
||||
}
|
||||
|
||||
type args struct {
|
||||
state state.BeaconState
|
||||
epoch primitives.Epoch
|
||||
validatorCount uint64
|
||||
epoch primitives.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -48,32 +60,32 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
{
|
||||
name: "genesis validator count, epoch 0",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 0,
|
||||
validatorCount: params.BeaconConfig().MinGenesisActiveValidatorCount,
|
||||
epoch: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "genesis validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 100,
|
||||
validatorCount: params.BeaconConfig().MinGenesisActiveValidatorCount,
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "less than optimal validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MaxValidatorsPerCommittee),
|
||||
epoch: 100,
|
||||
validatorCount: params.BeaconConfig().MaxValidatorsPerCommittee,
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no active validators, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, 0), // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
validatorCount: 0, // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "no active validator indices",
|
||||
@@ -81,13 +93,18 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
got, err := altair.NextSyncCommitteeIndices(context.Background(), tt.args.state)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().SyncCommitteeSize), len(got))
|
||||
for _, v := range []int{version.Altair, version.Electra} {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
st := getState(t, tt.args.validatorCount, v)
|
||||
got, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().SyncCommitteeSize), len(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -110,25 +110,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
if att.Version() < version.Electra {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if att.Version() >= version.Electra {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
@@ -154,6 +136,29 @@ func VerifyAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if committee == nil {
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
|
||||
@@ -58,10 +58,9 @@ func AreEth1DataEqual(a, b *ethpb.Eth1Data) bool {
|
||||
// votes to see if they match the eth1data.
|
||||
func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb.Eth1Data) (bool, error) {
|
||||
voteCount := uint64(0)
|
||||
data = ethpb.CopyETH1Data(data)
|
||||
|
||||
for _, vote := range beaconState.Eth1DataVotes() {
|
||||
if AreEth1DataEqual(vote, data) {
|
||||
if AreEth1DataEqual(vote, data.Copy()) {
|
||||
voteCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
if len(newETH1DataVotes) <= 1 {
|
||||
t.Error("Expected new ETH1 data votes to have length > 1")
|
||||
}
|
||||
if !proto.Equal(beaconState.Eth1Data(), ethpb.CopyETH1Data(b.Block.Body.Eth1Data)) {
|
||||
if !proto.Equal(beaconState.Eth1Data(), b.Block.Body.Eth1Data.Copy()) {
|
||||
t.Errorf(
|
||||
"Expected latest eth1 data to have been set to %v, received %v",
|
||||
b.Block.Body.Eth1Data,
|
||||
|
||||
@@ -98,6 +98,8 @@ func ProcessVoluntaryExits(
|
||||
// assert get_current_epoch(state) >= voluntary_exit.epoch
|
||||
// # Verify the validator has been active long enough
|
||||
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0 # [New in Electra:EIP7251]
|
||||
// # Verify signature
|
||||
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||
// signing_root = compute_signing_root(voluntary_exit, domain)
|
||||
@@ -113,7 +115,6 @@ func VerifyExitAndSignature(
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
|
||||
currentSlot := state.Slot()
|
||||
fork := state.Fork()
|
||||
genesisRoot := state.GenesisValidatorsRoot()
|
||||
|
||||
@@ -128,7 +129,7 @@ func VerifyExitAndSignature(
|
||||
}
|
||||
|
||||
exit := signed.Exit
|
||||
if err := verifyExitConditions(validator, currentSlot, exit); err != nil {
|
||||
if err := verifyExitConditions(state, validator, exit); err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
|
||||
@@ -157,14 +158,16 @@ func VerifyExitAndSignature(
|
||||
// assert get_current_epoch(state) >= voluntary_exit.epoch
|
||||
// # Verify the validator has been active long enough
|
||||
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0 # [New in Electra:EIP7251]
|
||||
// # Verify signature
|
||||
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||
// signing_root = compute_signing_root(voluntary_exit, domain)
|
||||
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
|
||||
// # Initiate exit
|
||||
// initiate_validator_exit(state, voluntary_exit.validator_index)
|
||||
func verifyExitConditions(validator state.ReadOnlyValidator, currentSlot primitives.Slot, exit *ethpb.VoluntaryExit) error {
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnlyValidator, exit *ethpb.VoluntaryExit) error {
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
return errors.New("non-active validator cannot exit")
|
||||
@@ -187,5 +190,17 @@ func verifyExitConditions(validator state.ReadOnlyValidator, currentSlot primiti
|
||||
validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod,
|
||||
)
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
// Only exit validator if it has no pending withdrawals in the queue.
|
||||
ok, err := st.HasPendingBalanceToWithdraw(exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to retrieve pending balance to withdraw for validator %d: %w", exit.ValidatorIndex, err)
|
||||
}
|
||||
if ok {
|
||||
return fmt.Errorf("validator %d must have no pending balance to withdraw", exit.ValidatorIndex)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,6 +135,11 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
// Remove after electra fork epoch is defined.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ElectraForkEpoch = cfg.DenebForkEpoch * 2
|
||||
params.SetActiveTestCleanup(t, cfg)
|
||||
// End remove section.
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
@@ -167,7 +172,7 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
wantErr: "nil exit",
|
||||
},
|
||||
{
|
||||
name: "Happy Path",
|
||||
name: "Happy Path phase0",
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -275,6 +280,52 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
return validator, signedExit, bs, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "EIP-7251 - pending balance to withdraw must be zero",
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().DenebForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: params.BeaconConfig().DenebForkEpoch + 1,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
bs, err = state_native.InitializeFromProtoUnsafeElectra(ðpb.BeaconStateElectra{
|
||||
GenesisValidatorsRoot: bs.GenesisValidatorsRoot(),
|
||||
Fork: fork,
|
||||
Slot: electraSlot,
|
||||
Validators: bs.Validators(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
err = bs.UpdateValidatorAtIndex(0, validator)
|
||||
require.NoError(t, err)
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
|
||||
// Give validator a pending balance to withdraw.
|
||||
require.NoError(t, bs.AppendPendingPartialWithdrawal(ðpb.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 500,
|
||||
}))
|
||||
|
||||
return validator, signedExit, bs, nil
|
||||
},
|
||||
wantErr: "validator 0 must have no pending balance to withdraw",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -2,11 +2,13 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -200,13 +202,13 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
@@ -220,70 +222,20 @@ func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (sta
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !complete {
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// Validate current header's parent hash matches state header's block hash.
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
// Validate header's timestamp matches with state in current slot.
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
|
||||
var payloadHash [32]byte
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -581,14 +583,18 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
|
||||
payload, err := body.Execution()
|
||||
require.NoError(t, err)
|
||||
want, err := consensusblocks.PayloadToHeader(payload)
|
||||
require.Equal(t, tt.err, err)
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -609,13 +615,15 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyCapella{
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
@@ -663,7 +671,13 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
p, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
require.Equal(t, true, ok)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -728,7 +742,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayloadHeader(st, tt.header)
|
||||
err = blocks.ValidatePayload(st, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -785,7 +799,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(tt.state, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -906,3 +920,15 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
}
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
@@ -204,12 +222,7 @@ func createAttestationSignatureBatch(
|
||||
return nil, err
|
||||
}
|
||||
indices := ia.GetAttestingIndices()
|
||||
pubkeys := make([][]byte, len(indices))
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
pubkeys[i] = pubkeyAtIdx[:]
|
||||
}
|
||||
aggP, err := bls.AggregatePublicKeys(pubkeys)
|
||||
aggP, err := beaconState.AggregateKeyFromIndices(indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -120,32 +120,35 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// expected_withdrawals = get_expected_withdrawals(state)
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, _, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
@@ -162,6 +165,11 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
|
||||
if len(wds) != len(expectedWithdrawals) {
|
||||
return nil, fmt.Errorf("execution payload header has %d withdrawals when %d were expected", len(wds), len(expectedWithdrawals))
|
||||
}
|
||||
|
||||
wdRoot, err = ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
@@ -182,6 +190,13 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
return nil, errors.Wrap(err, "could not decrease balance")
|
||||
}
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(expectedWithdrawals) > 0 {
|
||||
if err := st.SetNextWithdrawalIndex(expectedWithdrawals[len(expectedWithdrawals)-1].Index + 1); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal index")
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -675,6 +677,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal // Electra
|
||||
}
|
||||
type control struct {
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
@@ -772,7 +775,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "Less than max sweep at end",
|
||||
Name: "less than max sweep at end",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{80, 81, 82, 83},
|
||||
@@ -789,7 +792,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "Less than max sweep and beginning",
|
||||
Name: "less than max sweep and beginning",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{4, 5, 6},
|
||||
@@ -846,6 +849,36 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many withdrawals with pending partial withdrawals in state",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
},
|
||||
PendingPartialWithdrawals: []*ethpb.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 11,
|
||||
Amount: withdrawalAmount(11) - maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 40,
|
||||
NextWithdrawalIndex: 29,
|
||||
Balances: map[uint64]uint64{
|
||||
7: 0, 19: 0, 28: 0,
|
||||
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
|
||||
15: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Args: args{
|
||||
Name: "success more than max fully withdrawals",
|
||||
@@ -1011,65 +1044,97 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
prepareValidators := func(st *ethpb.BeaconStateCapella, arguments args) (state.BeaconState, error) {
|
||||
prepareValidators := func(st state.BeaconState, arguments args) error {
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
st.Balances = make([]uint64, numValidators)
|
||||
if err := st.SetBalances(make([]uint64, numValidators)); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = epochInFuture
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
v.WithdrawalCredentials[31] = byte(i)
|
||||
st.Balances[i] = v.EffectiveBalance - uint64(rand.Intn(1000))
|
||||
if err := st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), v.EffectiveBalance-uint64(rand.Intn(1000))); err != nil {
|
||||
return err
|
||||
}
|
||||
validators[i] = v
|
||||
}
|
||||
for _, idx := range arguments.FullWithdrawalIndices {
|
||||
if idx != notWithdrawableIndex {
|
||||
validators[idx].WithdrawableEpoch = epochInPast
|
||||
}
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
|
||||
return err
|
||||
}
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
}
|
||||
for _, idx := range arguments.PendingPartialWithdrawalIndices {
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
st.Balances[idx] = withdrawalAmount(idx)
|
||||
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st.Validators = validators
|
||||
return state_native.InitializeFromProtoCapella(st)
|
||||
return st.SetValidators(validators)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Args.Name, func(t *testing.T) {
|
||||
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
|
||||
if test.Args.Withdrawals == nil {
|
||||
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
|
||||
for _, fork := range []int{version.Capella, version.Electra} {
|
||||
t.Run(version.String(fork), func(t *testing.T) {
|
||||
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
|
||||
if test.Args.Withdrawals == nil {
|
||||
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
|
||||
}
|
||||
if test.Args.FullWithdrawalIndices == nil {
|
||||
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
if test.Args.PendingPartialWithdrawalIndices == nil {
|
||||
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
slot, err := slots.EpochStart(currentEpoch)
|
||||
require.NoError(t, err)
|
||||
var st state.BeaconState
|
||||
var p interfaces.ExecutionData
|
||||
switch fork {
|
||||
case version.Capella:
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Slot: slot,
|
||||
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
|
||||
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
|
||||
}
|
||||
st, err = state_native.InitializeFromProtoUnsafeCapella(spb)
|
||||
require.NoError(t, err)
|
||||
p, err = consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
case version.Electra:
|
||||
spb := ðpb.BeaconStateElectra{
|
||||
Slot: slot,
|
||||
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
|
||||
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
|
||||
PendingPartialWithdrawals: test.Args.PendingPartialWithdrawals,
|
||||
}
|
||||
st, err = state_native.InitializeFromProtoUnsafeElectra(spb)
|
||||
require.NoError(t, err)
|
||||
p, err = consensusblocks.WrappedExecutionPayloadElectra(&enginev1.ExecutionPayloadElectra{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
t.Fatalf("Add a beacon state setup for version %s", version.String(fork))
|
||||
}
|
||||
err = prepareValidators(st, test.Args)
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
checkPostState(t, test.Control, post)
|
||||
}
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
|
||||
})
|
||||
}
|
||||
if test.Args.FullWithdrawalIndices == nil {
|
||||
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
if test.Args.PendingPartialWithdrawalIndices == nil {
|
||||
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
slot, err := slots.EpochStart(currentEpoch)
|
||||
require.NoError(t, err)
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Slot: slot,
|
||||
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
|
||||
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
|
||||
}
|
||||
st, err := prepareValidators(spb, test.Args)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
checkPostState(t, test.Control, post)
|
||||
}
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"effective_balance_updates.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
@@ -18,15 +19,19 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
@@ -44,8 +49,11 @@ go_test(
|
||||
srcs = [
|
||||
"churn_test.go",
|
||||
"consolidations_test.go",
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
@@ -53,11 +61,15 @@ go_test(
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -65,7 +77,9 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
|
||||
@@ -2,6 +2,7 @@ package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
@@ -18,10 +19,17 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
wd := make([]byte, 32)
|
||||
wd[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wd[31] = byte(i)
|
||||
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
}
|
||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -42,7 +49,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
@@ -59,7 +66,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > currentEpoch {
|
||||
if sourceValidator.WithdrawableEpoch > nextEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -86,3 +93,162 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if source_index == target_index:
|
||||
// return
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has execution withdrawal credentials
|
||||
// if not has_execution_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.TargetPubkey))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcIdx == tgtIdx {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !helpers.HasExecutionWithdrawalCredentials(srcV) {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !helpers.HasExecutionWithdrawalCredentials(tgtV) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator are exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute consolidaiton epoch: %w", err)
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -229,3 +232,199 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
st.Validators[16].ExitEpoch = 10
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
// Source doesn't have withdrawal credentials.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
// Source withdrawal credentials don't match the consolidation address.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
TargetPubkey: []byte("val_8"),
|
||||
},
|
||||
// Source is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||
SourcePubkey: []byte("val_9"),
|
||||
TargetPubkey: []byte("val_10"),
|
||||
},
|
||||
// Target is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||
SourcePubkey: []byte("val_11"),
|
||||
TargetPubkey: []byte("val_12"),
|
||||
},
|
||||
// Source is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||
SourcePubkey: []byte("val_13"),
|
||||
TargetPubkey: []byte("val_14"),
|
||||
},
|
||||
// Target is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||
SourcePubkey: []byte("val_15"),
|
||||
TargetPubkey: []byte("val_16"),
|
||||
},
|
||||
// Source doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("INVALID"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Target doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("INVALID"),
|
||||
},
|
||||
// Source == target
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||
// not end the processing early.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), numPC)
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||
|
||||
// Verify the source validator is exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify no pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify the source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached during processing",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// The first consolidation was appended.
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||
|
||||
// Verify the second source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
require.NoError(t, err)
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
48
beacon-chain/core/electra/deposit_fuzz_test.go
Normal file
48
beacon-chain/core/electra/deposit_fuzz_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
require.NoError(t, err)
|
||||
r, err := electra.ProcessDeposits(ctx, s, deposits)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
require.NoError(t, err)
|
||||
r, err := electra.ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,15 +2,189 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessDeposits is one of the operations performed on each processed
|
||||
// beacon block to verify queued validators from the Ethereum 1.0 Deposit Contract
|
||||
// into the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// For each deposit in block.body.deposits:
|
||||
// process_deposit(state, deposit)
|
||||
func ProcessDeposits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessDeposits")
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
|
||||
for _, d := range deposits {
|
||||
if d == nil || d.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, d, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(d.Data.PublicKey))
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposit takes in a deposit object and inserts it
|
||||
// into the registry as a new validator or balance change.
|
||||
// Returns the resulting state, a boolean to indicate whether or not the deposit
|
||||
// resulted in a new validator entry into the beacon state, and any error.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
//
|
||||
// # Verify the Merkle branch
|
||||
// assert is_valid_merkle_branch(
|
||||
// leaf=hash_tree_root(deposit.data),
|
||||
// branch=deposit.proof,
|
||||
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
// index=state.eth1_deposit_index,
|
||||
// root=state.eth1_data.deposit_root,
|
||||
// )
|
||||
//
|
||||
// # Deposits must be processed in order
|
||||
// state.eth1_deposit_index += 1
|
||||
//
|
||||
// apply_deposit(
|
||||
// state=state,
|
||||
// pubkey=deposit.data.pubkey,
|
||||
// withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Wrapf(err, "could not verify deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
// def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> None:
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
//
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [Modified in Electra:EIP-7251]
|
||||
// # Check if valid deposit switch to compounding credentials
|
||||
//
|
||||
// if ( is_compounding_withdrawal_credential(withdrawal_credentials) and has_eth1_withdrawal_credential(state.validators[index])
|
||||
//
|
||||
// and is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature)
|
||||
// ):
|
||||
// switch_to_compounding_validator(state, index)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, amount); err != nil {
|
||||
return nil, errors.Wrap(err, "could not add validator to registry")
|
||||
}
|
||||
} else {
|
||||
// no validation on top-ups (phase0 feature). no validation before state change
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsCompoundingWithdrawalCredential(withdrawalCredentials) && helpers.HasETH1WithdrawalCredential(val) {
|
||||
if verifySignature {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(beaconState, index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not switch to compound validator")
|
||||
}
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// IsValidDepositSignature returns whether deposit_data is valid
|
||||
// def is_valid_deposit_signature(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> bool:
|
||||
//
|
||||
// deposit_message = DepositMessage( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, )
|
||||
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
// signing_root = compute_signing_root(deposit_message, domain)
|
||||
// return bls.Verify(pubkey, signing_root, signature)
|
||||
func IsValidDepositSignature(data *ethpb.Deposit_Data) (bool, error) {
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := verifyDepositDataSigningRoot(data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.WithError(err).Debug("Skipping deposit: could not verify deposit data signature")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
|
||||
return deposit.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
|
||||
//
|
||||
// Spec definition:
|
||||
@@ -19,12 +193,27 @@ import (
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
// deposits_to_postpone = []
|
||||
//
|
||||
// for deposit in state.pending_balance_deposits:
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// validator = state.validators[deposit.index]
|
||||
// # Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||
// if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||
// if get_current_epoch(state) <= validator.withdrawable_epoch:
|
||||
// deposits_to_postpone.append(deposit)
|
||||
// # Deposited balance will never become active. Increase balance but do not consume churn
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// # Validator is not exiting, attempt to process deposit
|
||||
// else:
|
||||
// # Deposit does not fit in the churn, no more deposit processing in this epoch.
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// # Deposit fits in the churn, process it. Increase balance and consume churn.
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// # Regardless of how the deposit was handled, we move on in the queue.
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
@@ -33,6 +222,8 @@ import (
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
//
|
||||
// state.pending_balance_deposits += deposits_to_postpone
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
defer span.End()
|
||||
@@ -45,43 +236,136 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
|
||||
processedAmount := uint64(0)
|
||||
nextDepositIndex := 0
|
||||
var depositsToPostpone []*eth.PendingBalanceDeposit
|
||||
|
||||
deposits, err := st.PendingBalanceDeposits()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if primitives.Gwei(deposit.Amount) > availableForProcessing {
|
||||
break
|
||||
// constants
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
for _, balanceDeposit := range deposits {
|
||||
v, err := st.ValidatorAtIndexReadOnly(balanceDeposit.Index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch validator at index: %w", err)
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
|
||||
return err
|
||||
|
||||
// If the validator is currently exiting, postpone the deposit until after the withdrawable
|
||||
// epoch.
|
||||
if v.ExitEpoch() < ffe {
|
||||
if nextEpoch <= v.WithdrawableEpoch() {
|
||||
depositsToPostpone = append(depositsToPostpone, balanceDeposit)
|
||||
} else {
|
||||
// The deposited balance will never become active. Therefore, we increase the balance but do
|
||||
// not consume the churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validator is not exiting, attempt to process deposit.
|
||||
if primitives.Gwei(processedAmount+balanceDeposit.Amount) > availableForProcessing {
|
||||
break
|
||||
}
|
||||
// Deposit fits in churn, process it. Increase balance and consume churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
processedAmount += balanceDeposit.Amount
|
||||
}
|
||||
availableForProcessing -= primitives.Gwei(deposit.Amount)
|
||||
|
||||
// Regardless of how the deposit was handled, we move on in the queue.
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
deposits = deposits[nextDepositIndex:]
|
||||
// Combined operation:
|
||||
// - state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
// - state.pending_balance_deposits += deposits_to_postpone
|
||||
// However, the number of remaining deposits must be maintained to properly update the deposit
|
||||
// balance to consume.
|
||||
numRemainingDeposits := len(deposits[nextDepositIndex:])
|
||||
deposits = append(deposits[nextDepositIndex:], depositsToPostpone...)
|
||||
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(deposits) == 0 {
|
||||
if numRemainingDeposits == 0 {
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
} else {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing)
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing - primitives.Gwei(processedAmount))
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
// TODO: replace with 6110 logic
|
||||
// return b.ProcessDepositRequests(beaconState, requests)
|
||||
|
||||
if len(requests) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0)
|
||||
for _, req := range requests {
|
||||
if req == nil {
|
||||
return nil, errors.New("got a nil DepositRequest")
|
||||
}
|
||||
deposits = append(deposits, ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: req.Pubkey,
|
||||
WithdrawalCredentials: req.WithdrawalCredentials,
|
||||
Amount: req.Amount,
|
||||
Signature: req.Signature,
|
||||
},
|
||||
})
|
||||
}
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
for _, receipt := range requests {
|
||||
beaconState, err = processDepositRequest(beaconState, receipt, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit receipt
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUEST_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// apply_deposit(
|
||||
// state=state,
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// )
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest, verifySignature bool) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
return ApplyDeposit(beaconState, ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
Amount: request.Amount,
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
}, verifySignature)
|
||||
}
|
||||
|
||||
@@ -6,11 +6,18 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
@@ -100,6 +107,80 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exiting validator deposit postponed",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 10
|
||||
v.WithdrawableEpoch = 20
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 1..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(1); i < 4; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed, except validator index 0 was
|
||||
// added back to the pending deposits queue.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exited validator balance increased",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 1)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: 1_000_000,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 2
|
||||
v.WithdrawableEpoch = 8
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, st.UpdateBalancesAtIndex(0, 100_000))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1_100_000), b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
@@ -126,3 +207,194 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newSt, st)
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "got a nil DepositRequest", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(make([]*eth.PendingBalanceDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
requests := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: depositMessage.PublicKey,
|
||||
Index: 0,
|
||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||
Amount: depositMessage.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
},
|
||||
}
|
||||
st, err = electra.ProcessDepositRequests(context.Background(), st, requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deps))
|
||||
require.NoError(t, err)
|
||||
registry := []*eth.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
st, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ð.Fork{
|
||||
PreviousVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
pdSt, err := electra.ProcessDeposits(context.Background(), st, deps)
|
||||
require.NoError(t, err)
|
||||
pbd, err := pdSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, pbd[2].Amount)
|
||||
require.Equal(t, 3, len(pbd))
|
||||
}
|
||||
|
||||
func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
// Same test settings as in TestProcessDeposit_AddsNewValidatorDeposit, except that we use an invalid signature
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ð.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*eth.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ð.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := electra.ProcessDeposit(beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 1 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 1 {
|
||||
t.Errorf("Expected validator list to have length 1, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 1 {
|
||||
t.Errorf("Expected validator balances list to have length 1, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDeposit_TopUps_WithBadSignature(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 3)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
topUpAmount := uint64(1234)
|
||||
depositData := ð.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: topUpAmount,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, true)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, topUpAmount, pbd[0].Amount)
|
||||
}
|
||||
|
||||
func TestApplyDeposit_Electra_SwitchToCompoundingValidator(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 3)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositData := ð.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
sr, err := signing.ComputeSigningRoot(depositData, bytesutil.ToBytes(3, 32))
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, false)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
|
||||
val.EffectiveBalance = effectiveBal
|
||||
return false, val, nil
|
||||
return true, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
@@ -2,12 +2,19 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
// ProcessRegistryUpdates processes all validators eligible for the activation queue, all validators
|
||||
// which should be ejected, and all validators which are eligible for activation from the queue.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
@@ -28,7 +35,72 @@ import (
|
||||
// for validator in state.validators:
|
||||
// if is_eligible_for_activation(state, validator):
|
||||
// validator.activation_epoch = activation_epoch
|
||||
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
// TODO: replace with real implementation
|
||||
return state, nil
|
||||
func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
ejectionBal := params.BeaconConfig().EjectionBalance
|
||||
activationEpoch := helpers.ActivationExitEpoch(currentEpoch)
|
||||
|
||||
// To avoid copying the state validator set via st.Validators(), we will perform a read only pass
|
||||
// over the validator set while collecting validator indices where the validator copy is actually
|
||||
// necessary, then we will process these operations.
|
||||
eligibleForActivationQ := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
|
||||
|
||||
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
// Collect validators eligible to enter the activation queue.
|
||||
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
|
||||
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators to eject.
|
||||
if val.EffectiveBalance() <= ejectionBal && helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators eligible for activation and not yet dequeued for activation.
|
||||
if helpers.IsEligibleForActivationUsingROVal(st, val) {
|
||||
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to read validators: %w", err)
|
||||
}
|
||||
|
||||
// Handle validators eligible to join the activation queue.
|
||||
for _, idx := range eligibleForActivationQ {
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.ActivationEligibilityEpoch = currentEpoch + 1
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return fmt.Errorf("failed to updated eligible validator at index %d: %w", idx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle validator ejections.
|
||||
for _, idx := range eligibleForEjection {
|
||||
var err error
|
||||
// exitQueueEpoch and churn arguments are not used in electra.
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, idx := range eligibleForActivation {
|
||||
// Activate all eligible validators.
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.ActivationEpoch = activationEpoch
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return fmt.Errorf("failed to activate validator at index %d: %w", idx, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
171
beacon-chain/core/electra/registry_updates_test.go
Normal file
171
beacon-chain/core/electra/registry_updates_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessRegistryUpdates(t *testing.T) {
|
||||
finalizedEpoch := primitives.Epoch(4)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "No rotation",
|
||||
state: func() state.BeaconState {
|
||||
base := ð.BeaconStateElectra{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: []*eth.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().MaxSeedLookahead},
|
||||
{ExitEpoch: params.BeaconConfig().MaxSeedLookahead},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalance,
|
||||
params.BeaconConfig().MaxEffectiveBalance,
|
||||
},
|
||||
FinalizedCheckpoint: ð.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
for i, val := range st.Validators() {
|
||||
require.Equal(t, params.BeaconConfig().MaxSeedLookahead, val.ExitEpoch, "validator updated unexpectedly at index %d", i)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Validators are activated",
|
||||
state: func() state.BeaconState {
|
||||
base := ð.BeaconStateElectra{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
activationEpoch := helpers.ActivationExitEpoch(5)
|
||||
// All validators should be activated.
|
||||
for i, val := range st.Validators() {
|
||||
require.Equal(t, activationEpoch, val.ActivationEpoch, "failed to update validator at index %d", i)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Validators are exited",
|
||||
state: func() state.BeaconState {
|
||||
base := ð.BeaconStateElectra{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// All validators should be exited
|
||||
for i, val := range st.Validators() {
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, val.ExitEpoch, "failed to update exit epoch on validator %d", i)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, val.WithdrawableEpoch, "failed to update withdrawable epoch on validator %d", i)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Validators are exiting",
|
||||
state: func() state.BeaconState {
|
||||
base := ð.BeaconStateElectra{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: 10,
|
||||
WithdrawableEpoch: 20,
|
||||
})
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// All validators should be exited
|
||||
for i, val := range st.Validators() {
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, val.ExitEpoch, "failed to update exit epoch on validator %d", i)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, val.WithdrawableEpoch, "failed to update withdrawable epoch on validator %d", i)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessRegistryUpdates(context.TODO(), tt.state)
|
||||
require.NoError(t, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
|
||||
bal := params.BeaconConfig().EjectionBalance - 1
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
genValidators := func(num uint64) []*eth.Validator {
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
vals[i] = ð.Validator{
|
||||
EffectiveBalance: bal,
|
||||
ExitEpoch: ffe,
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(b, err)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
if err := st.SetValidators(genValidators(100000)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
if err := electra.ProcessRegistryUpdates(context.TODO(), st); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,15 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -74,8 +77,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
if err := ProcessRegistryUpdates(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
@@ -127,3 +129,33 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyBlockDepositLength
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
func VerifyBlockDepositLength(body interfaces.ReadOnlyBeaconBlockBody, state state.BeaconState) error {
|
||||
eth1Data := state.Eth1Data()
|
||||
requestsStartIndex, err := state.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get requests start index")
|
||||
}
|
||||
eth1DepositIndexLimit := min(eth1Data.DepositCount, requestsStartIndex)
|
||||
if state.Eth1DepositIndex() < eth1DepositIndexLimit {
|
||||
if uint64(len(body.Deposits())) != min(params.BeaconConfig().MaxDeposits, eth1DepositIndexLimit-state.Eth1DepositIndex()) {
|
||||
return fmt.Errorf("incorrect outstanding deposits in block body, wanted: %d, got: %d", min(params.BeaconConfig().MaxDeposits, eth1DepositIndexLimit-state.Eth1DepositIndex()), len(body.Deposits()))
|
||||
}
|
||||
} else {
|
||||
if len(body.Deposits()) != 0 {
|
||||
return fmt.Errorf("incorrect outstanding deposits in block body, wanted: %d, got: %d", 0, len(body.Deposits()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
101
beacon-chain/core/electra/transition_no_verify_sig.go
Normal file
101
beacon-chain/core/electra/transition_no_verify_sig.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
var (
|
||||
ProcessBLSToExecutionChanges = blocks.ProcessBLSToExecutionChanges
|
||||
ProcessVoluntaryExits = blocks.ProcessVoluntaryExits
|
||||
ProcessAttesterSlashings = blocks.ProcessAttesterSlashings
|
||||
ProcessProposerSlashings = blocks.ProcessProposerSlashings
|
||||
)
|
||||
|
||||
// ProcessOperations
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_request) # [New in Electra:EIP6110]
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_withdrawal_request)
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
st, err = ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
}
|
||||
// new in electra
|
||||
e, err := bb.Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data from block")
|
||||
}
|
||||
exe, ok := e.(interfaces.ExecutionDataElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, exe.ConsolidationRequests()); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
118
beacon-chain/core/electra/transition_test.go
Normal file
118
beacon-chain/core/electra/transition_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestVerifyOperationLengths_Electra(t *testing.T) {
|
||||
t.Run("happy path", func(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.VerifyBlockDepositLength(sb.Block().Body(), s))
|
||||
})
|
||||
t.Run("eth1depositIndex less than eth1depositIndexLimit & number of deposits incorrect", func(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetEth1DepositIndex(0))
|
||||
require.NoError(t, s.SetDepositRequestsStartIndex(1))
|
||||
err = electra.VerifyBlockDepositLength(sb.Block().Body(), s)
|
||||
require.ErrorContains(t, "incorrect outstanding deposits in block body", err)
|
||||
})
|
||||
t.Run("eth1depositIndex more than eth1depositIndexLimit & number of deposits is not 0", func(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
sb.SetDeposits([]*ethpb.Deposit{
|
||||
{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: make([]byte, common.AddressLength),
|
||||
Signature: []byte{4, 5, 6},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, s.SetEth1DepositIndex(1))
|
||||
require.NoError(t, s.SetDepositRequestsStartIndex(1))
|
||||
err = electra.VerifyBlockDepositLength(sb.Block().Body(), s)
|
||||
require.ErrorContains(t, "incorrect outstanding deposits in block body", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*ethpb.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ðpb.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
TargetIndex: 3,
|
||||
},
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
}))
|
||||
err := electra.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
require.Equal(t, uint64(44799839993), b[0])
|
||||
|
||||
s, err := st.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(s)))
|
||||
|
||||
p, err := st.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
|
||||
|
||||
p, err = st.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
|
||||
|
||||
sc, err := st.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
|
||||
|
||||
sc, err = st.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
|
||||
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), num)
|
||||
}
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// consolidation_requests_root=Root(), # [New in Electra:EIP7251]
|
||||
// )
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
|
||||
@@ -7,8 +7,74 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState,
|
||||
//
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64) -> None:
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials)
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, 0) # [Modified in Electra:EIP7251]
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [New in Electra:EIP7251]
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val := ValidatorFromDeposit(pubKey, withdrawalCredentials)
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
return errors.New("could not find validator in registry")
|
||||
}
|
||||
if err := beaconState.AppendBalance(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return beaconState.AppendCurrentParticipationBits(0)
|
||||
}
|
||||
|
||||
// ValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32) -> Validator:
|
||||
//
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=0, # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// )
|
||||
func ValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte) *ethpb.Validator {
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 0, // [Modified in Electra:EIP7251]
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToCompoundingValidator
|
||||
//
|
||||
// Spec definition:
|
||||
@@ -36,7 +102,7 @@ func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorI
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -13,6 +14,20 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestAddValidatorToRegistry(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.AddValidatorToRegistry(st, make([]byte, fieldparams.BLSPubkeyLength), make([]byte, fieldparams.RootLength), 100))
|
||||
balances := st.Balances()
|
||||
require.Equal(t, 1, len(balances))
|
||||
require.Equal(t, uint64(0), balances[0])
|
||||
pbds, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbds))
|
||||
require.Equal(t, uint64(100), pbds[0].Amount)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbds[0].Index)
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
|
||||
@@ -26,66 +26,67 @@ import (
|
||||
//
|
||||
// def process_withdrawal_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// withdrawal_request: WithdrawalRequest
|
||||
// state: BeaconState,
|
||||
// withdrawal_request: WithdrawalRequest
|
||||
//
|
||||
// ) -> None:
|
||||
// amount = execution_layer_withdrawal_request.amount
|
||||
// is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||
// ) -> None:
|
||||
//
|
||||
// # If partial withdrawal queue is full, only full exits are processed
|
||||
// if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
|
||||
// return
|
||||
// amount = withdrawal_request.amount
|
||||
// is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkey exists
|
||||
// request_pubkey = execution_layer_withdrawal_request.validator_pubkey
|
||||
// if request_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
||||
// validator = state.validators[index]
|
||||
// # If partial withdrawal queue is full, only full exits are processed
|
||||
// if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
|
||||
// return
|
||||
//
|
||||
// # Verify withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(validator)
|
||||
// is_correct_source_address = (
|
||||
// validator.withdrawal_credentials[12:] == execution_layer_withdrawal_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
// # Verify the validator is active
|
||||
// if not is_active_validator(validator, get_current_epoch(state)):
|
||||
// return
|
||||
// # Verify exit has not been initiated
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// # Verify the validator has been active long enough
|
||||
// if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkey exists
|
||||
// request_pubkey = withdrawal_request.validator_pubkey
|
||||
// if request_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
||||
// validator = state.validators[index]
|
||||
//
|
||||
// pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index)
|
||||
// # Verify withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(validator)
|
||||
// is_correct_source_address = (
|
||||
// validator.withdrawal_credentials[12:] == withdrawal_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
// # Verify the validator is active
|
||||
// if not is_active_validator(validator, get_current_epoch(state)):
|
||||
// return
|
||||
// # Verify exit has not been initiated
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// # Verify the validator has been active long enough
|
||||
// if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// if is_full_exit_request:
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// if pending_balance_to_withdraw == 0:
|
||||
// initiate_validator_exit(state, index)
|
||||
// return
|
||||
// pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index)
|
||||
//
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
// if is_full_exit_request:
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// if pending_balance_to_withdraw == 0:
|
||||
// initiate_validator_exit(state, index)
|
||||
// return
|
||||
//
|
||||
// # Only allow partial withdrawals with compounding withdrawal credentials
|
||||
// if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
|
||||
// withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
// state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
|
||||
// index=index,
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
//
|
||||
// # Only allow partial withdrawals with compounding withdrawal credentials
|
||||
// if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
|
||||
// withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
// state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
|
||||
// index=index,
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
|
||||
@@ -81,7 +81,6 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
}))
|
||||
_, err = wantPostSt.ExitEpochAndUpdateChurn(primitives.Gwei(v.EffectiveBalance))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
webc, err := wantPostSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
gebc, err := got.ExitBalanceToConsume()
|
||||
@@ -110,10 +109,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
prefix[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
bal, err := preSt.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
bal += 200
|
||||
require.NoError(t, preSt.SetBalances([]uint64{bal}))
|
||||
require.NoError(t, preSt.SetBalances([]uint64{params.BeaconConfig().MinActivationBalance + 200}))
|
||||
require.NoError(t, preSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 100,
|
||||
@@ -168,7 +164,6 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
require.Equal(t, wece, gece)
|
||||
_, err = wantPostSt.ExitEpochAndUpdateChurn(primitives.Gwei(100))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
webc, err := wantPostSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
gebc, err := got.ExitBalanceToConsume()
|
||||
|
||||
@@ -22,7 +22,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
@@ -53,7 +52,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -20,32 +20,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
//
|
||||
// WARNING: This method allocates a new copy of the attesting validator indices set and is
|
||||
// considered to be very memory expensive. Avoid using this unless you really
|
||||
// need to get attesting balance from attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
|
||||
// """
|
||||
// Return the combined effective balance of the set of unslashed validators participating in ``attestations``.
|
||||
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
// """
|
||||
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
|
||||
func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) (uint64, error) {
|
||||
indices, err := UnslashedAttestingIndices(ctx, state, atts)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attesting indices")
|
||||
}
|
||||
return helpers.TotalBalance(state, indices), nil
|
||||
}
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
@@ -96,7 +73,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
}
|
||||
|
||||
// Collect validators eligible for activation and not yet dequeued for activation.
|
||||
if helpers.IsEligibleForActivationUsingTrie(st, val) {
|
||||
if helpers.IsEligibleForActivationUsingROVal(st, val) {
|
||||
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
@@ -455,51 +432,3 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// UnslashedAttestingIndices returns all the attesting indices from a list of attestations,
|
||||
// it sorts the indices and filters out the slashed ones.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_unslashed_attesting_indices(state: BeaconState,
|
||||
// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
|
||||
// output = set() # type: Set[ValidatorIndex]
|
||||
// for a in attestations:
|
||||
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
|
||||
// return set(filter(lambda index: not state.validators[index].slashed, output))
|
||||
func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) ([]primitives.ValidatorIndex, error) {
|
||||
var setIndices []primitives.ValidatorIndex
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a set for attesting indices
|
||||
for _, index := range attestingIndices {
|
||||
if !seen[index] {
|
||||
setIndices = append(setIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
seen[index] = true
|
||||
}
|
||||
}
|
||||
// Sort the attesting set indices by increasing order.
|
||||
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
|
||||
// Remove the slashed validator indices.
|
||||
for i := 0; i < len(setIndices); i++ {
|
||||
v, err := state.ValidatorAtIndexReadOnly(setIndices[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to look up validator")
|
||||
}
|
||||
if !v.IsNil() && v.Slashed() {
|
||||
setIndices = append(setIndices[:i], setIndices[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
return setIndices, nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -24,131 +23,6 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestUnslashedAttestingIndices_CanSortAndFilter(t *testing.T) {
|
||||
// Generate 2 attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 2)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators and state for the 2 attestations.
|
||||
validatorCount := 1000
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(indices)-1; i++ {
|
||||
if indices[i] >= indices[i+1] {
|
||||
t.Error("sorted indices not sorted or duplicated")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the slashed validator is filtered.
|
||||
slashedValidator := indices[0]
|
||||
validators = beaconState.Validators()
|
||||
validators[slashedValidator].Slashed = true
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
indices, err = epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(indices); i++ {
|
||||
assert.NotEqual(t, slashedValidator, indices[i], "Slashed validator %d is not filtered", slashedValidator)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnslashedAttestingIndices_DuplicatedAttestations(t *testing.T) {
|
||||
// Generate 5 of the same attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 5)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}},
|
||||
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators and state for the 5 attestations.
|
||||
validatorCount := 1000
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(indices)-1; i++ {
|
||||
if indices[i] >= indices[i+1] {
|
||||
t.Error("sorted indices not sorted or duplicated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestingBalance_CorrectBalance(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
// Generate 2 attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 2)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: primitives.Slot(i),
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators with balances and state for the 2 attestations.
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 2,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
balance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
wanted := 256 * params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, balance)
|
||||
}
|
||||
|
||||
func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
|
||||
@@ -47,7 +47,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
@@ -59,90 +58,6 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
assert.Equal(t, wanted, beaconState.Balances()[0], "Unexpected balance")
|
||||
}
|
||||
|
||||
func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Root: emptyRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: emptyRoot[:],
|
||||
},
|
||||
BeaconBlockRoot: emptyRoot[:],
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
InclusionDelay: 1,
|
||||
}
|
||||
}
|
||||
base.PreviousEpochAttestations = atts
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
slashedAttestedIndices := []primitives.ValidatorIndex{1413}
|
||||
for _, i := range slashedAttestedIndices {
|
||||
vs := beaconState.Validators()
|
||||
vs[i].Slashed = true
|
||||
require.Equal(t, nil, beaconState.SetValidators(vs))
|
||||
}
|
||||
|
||||
vp, bp, err := New(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add some variances to target and head balances.
|
||||
// See: https://github.com/prysmaticlabs/prysm/issues/5593
|
||||
bp.PrevEpochTargetAttested /= 2
|
||||
bp.PrevEpochHeadAttested = bp.PrevEpochHeadAttested * 2 / 3
|
||||
rewards, penalties, err := AttestationsDelta(beaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
attestedBalance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
totalBalance, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
attestedIndices := []primitives.ValidatorIndex{55, 1339, 1746, 1811, 1569}
|
||||
for _, i := range attestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
require.NoError(t, err, "Could not get base reward")
|
||||
|
||||
// Base rewards for getting source right
|
||||
wanted := attestedBalance*base/totalBalance +
|
||||
bp.PrevEpochTargetAttested*base/totalBalance +
|
||||
bp.PrevEpochHeadAttested*base/totalBalance
|
||||
// Base rewards for proposer and attesters working together getting attestation
|
||||
// on chain in the fatest manner
|
||||
proposerReward := base / params.BeaconConfig().ProposerRewardQuotient
|
||||
wanted += (base-proposerReward)*uint64(params.BeaconConfig().MinAttestationInclusionDelay) - 1
|
||||
assert.Equal(t, wanted, rewards[i], "Unexpected reward balance for validator with index %d", i)
|
||||
// Since all these validators attested, they shouldn't get penalized.
|
||||
assert.Equal(t, uint64(0), penalties[i], "Unexpected penalty balance")
|
||||
}
|
||||
|
||||
for _, i := range slashedAttestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
|
||||
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
|
||||
}
|
||||
|
||||
nonAttestedIndices := []primitives.ValidatorIndex{434, 677, 872, 791}
|
||||
for _, i := range nonAttestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
wanted := 3 * base
|
||||
// Since all these validators did not attest, they shouldn't get rewarded.
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected reward balance")
|
||||
// Base penalties for not attesting.
|
||||
assert.Equal(t, wanted, penalties[i], "Unexpected penalty balance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
|
||||
@@ -24,7 +24,7 @@ type Validator struct {
|
||||
IsPrevEpochSourceAttester bool
|
||||
// IsPrevEpochTargetAttester is true if the validator attested previous epoch target.
|
||||
IsPrevEpochTargetAttester bool
|
||||
// IsHeadAttester is true if the validator attested head.
|
||||
// IsPrevEpochHeadAttester is true if the validator attested the previous epoch head.
|
||||
IsPrevEpochHeadAttester bool
|
||||
|
||||
// CurrentEpochEffectiveBalance is how much effective balance this validator has current epoch.
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -82,6 +82,47 @@ func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, at
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommittees returns the list of all beacon committees for a given state at a given slot.
|
||||
func BeaconCommittees(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) ([][]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
activeCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute active validator count")
|
||||
}
|
||||
committeesPerSlot := SlotCommitteeCount(activeCount)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get seed")
|
||||
}
|
||||
|
||||
committees := make([][]primitives.ValidatorIndex, committeesPerSlot)
|
||||
var activeIndices []primitives.ValidatorIndex
|
||||
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := committeeCache.Committee(ctx, slot, seed, idx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
if committee != nil {
|
||||
committees[idx] = committee
|
||||
continue
|
||||
}
|
||||
|
||||
if len(activeIndices) == 0 {
|
||||
activeIndices, err = ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
}
|
||||
committee, err = BeaconCommittee(ctx, activeIndices, seed, slot, idx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committee")
|
||||
}
|
||||
committees[idx] = committee
|
||||
}
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
|
||||
// is a spec implementation where state is used as an argument. In case of state retrieval
|
||||
// becomes expensive, consider using BeaconCommittee below.
|
||||
@@ -253,36 +294,22 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve active validator count for the specified epoch.
|
||||
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the number of committees per slot based on the number of active validator indices.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Compute committees for the current slot.
|
||||
for j := uint64(0); j < numCommitteesPerSlot; j++ {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committees")
|
||||
}
|
||||
for j, committee := range committees {
|
||||
for _, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
@@ -296,7 +323,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
@@ -311,24 +337,6 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
|
||||
// a valid length matching the size of the committee.
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att ethpb.Att) error {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve beacon committees")
|
||||
}
|
||||
|
||||
if committee == nil {
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShuffledIndices uses input beacon state and returns the shuffled indices of the input epoch,
|
||||
// the shuffled indices then can be used to break up into committees.
|
||||
func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -402,7 +403,12 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
att := tt.attestation
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, committee)
|
||||
err = helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee)))
|
||||
if tt.verificationFailure {
|
||||
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
|
||||
} else {
|
||||
@@ -749,3 +755,27 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconCommittees(t *testing.T) {
|
||||
prevConfig := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(prevConfig)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinGenesisActiveValidatorCount = 128
|
||||
c.SlotsPerEpoch = 4
|
||||
c.TargetCommitteeSize = 16
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 256)
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
committees, err := helpers.BeaconCommittees(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, committeesPerSlot, uint64(len(committees)))
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, 0, idx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, committees[idx], committee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -68,7 +68,7 @@ func IsNextPeriodSyncCommittee(
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -95,7 +95,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -129,7 +129,7 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -357,10 +358,10 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte: #[Modified in Electra:EIP7251]
|
||||
// return candidate_index
|
||||
// i += 1
|
||||
func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
|
||||
func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
|
||||
length := uint64(len(activeIndices))
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
@@ -385,7 +386,12 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
}
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
|
||||
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
maxEB := params.BeaconConfig().MaxEffectiveBalance
|
||||
if bState.Version() >= version.Electra {
|
||||
maxEB = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
@@ -464,13 +470,9 @@ func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator *ethpb.Va
|
||||
return isEligibleForActivation(validator.ActivationEligibilityEpoch, validator.ActivationEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// IsEligibleForActivationUsingTrie checks if the validator is eligible for activation.
|
||||
func IsEligibleForActivationUsingTrie(state state.ReadOnlyCheckpoint, validator state.ReadOnlyValidator) bool {
|
||||
cpt := state.FinalizedCheckpoint()
|
||||
if cpt == nil {
|
||||
return false
|
||||
}
|
||||
return isEligibleForActivation(validator.ActivationEligibilityEpoch(), validator.ActivationEpoch(), cpt.Epoch)
|
||||
// IsEligibleForActivationUsingROVal checks if the validator is eligible for activation using the provided read only validator.
|
||||
func IsEligibleForActivationUsingROVal(state state.ReadOnlyCheckpoint, validator state.ReadOnlyValidator) bool {
|
||||
return isEligibleForActivation(validator.ActivationEligibilityEpoch(), validator.ActivationEpoch(), state.FinalizedCheckpointEpoch())
|
||||
}
|
||||
|
||||
// isEligibleForActivation carries out the logic for IsEligibleForActivation*
|
||||
@@ -525,16 +527,16 @@ func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return isCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
|
||||
return IsCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
// isCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
|
||||
// IsCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
|
||||
// return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX
|
||||
func isCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
func IsCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
@@ -548,7 +550,7 @@ func isCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
// Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
|
||||
// """
|
||||
// return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
|
||||
func HasExecutionWithdrawalCredentials(v *ethpb.Validator) bool {
|
||||
func HasExecutionWithdrawalCredentials(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
@@ -582,13 +584,13 @@ func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
|
||||
// and validator.withdrawable_epoch <= epoch
|
||||
// and balance > 0
|
||||
// )
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil || balance <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch <= epoch
|
||||
}
|
||||
|
||||
@@ -598,12 +600,12 @@ func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch pr
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
// partial withdrawal. This function assumes that the caller has a lock on the state.
|
||||
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
if fork < version.Electra {
|
||||
return isPartiallyWithdrawableValidatorCapella(val, balance, epoch)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -597,10 +598,11 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
seed [32]byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want primitives.ValidatorIndex
|
||||
wantedErr string
|
||||
name string
|
||||
isElectraOrAbove bool
|
||||
args args
|
||||
want primitives.ValidatorIndex
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "all_active_indices",
|
||||
@@ -682,6 +684,54 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
},
|
||||
want: 7,
|
||||
},
|
||||
{
|
||||
name: "electra_probability_changes",
|
||||
isElectraOrAbove: true,
|
||||
args: args{
|
||||
validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{3},
|
||||
seed: seed,
|
||||
},
|
||||
want: 3,
|
||||
},
|
||||
{
|
||||
name: "electra_probability_changes_all_active",
|
||||
isElectraOrAbove: true,
|
||||
args: args{
|
||||
validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}, // skip this one
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{0, 1, 2, 3, 4},
|
||||
seed: seed,
|
||||
},
|
||||
want: 4,
|
||||
},
|
||||
{
|
||||
name: "electra_probability_returns_first_validator_with_criteria",
|
||||
isElectraOrAbove: true,
|
||||
args: args{
|
||||
validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 1},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{EffectiveBalance: 1},
|
||||
{EffectiveBalance: 1},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{1},
|
||||
seed: seed,
|
||||
},
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -690,6 +740,11 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
if tt.isElectraOrAbove {
|
||||
stTrie, err = state_native.InitializeFromProtoUnsafeElectra(ðpb.BeaconStateElectra{Validators: tt.args.validators})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
@@ -916,6 +971,7 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
validator *ethpb.Validator
|
||||
balance uint64
|
||||
epoch primitives.Epoch
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
@@ -973,13 +1029,14 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
fork: version.Electra,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch))
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -990,6 +1047,7 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
validator *ethpb.Validator
|
||||
balance uint64
|
||||
epoch primitives.Epoch
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
@@ -1037,6 +1095,7 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
},
|
||||
balance: params.BeaconConfig().MinActivationBalance * 2,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
fork: version.Electra,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
@@ -1047,13 +1106,14 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
fork: version.Electra,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch))
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
48
beacon-chain/core/peerdas/BUILD.bazel
Normal file
48
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,48 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
587
beacon-chain/core/peerdas/helpers.go
Normal file
587
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,587 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint8
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, blobCellsAndProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
|
||||
// data column.
|
||||
func VerifyDataColumnSidecarKZGProofs(sc blocks.RODataColumn) (bool, error) {
|
||||
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
var commitments []kzg.Bytes48
|
||||
var indices []uint64
|
||||
var cells []kzg.Cell
|
||||
var proofs []kzg.Bytes48
|
||||
for i := range sc.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sc.KzgCommitments[i]))
|
||||
indices = append(indices, sc.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sc.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sc.KzgProof[i]))
|
||||
}
|
||||
|
||||
return kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
count := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
count = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// CustodyColumnCount returns the number of columns the node should custody.
|
||||
func CustodyColumnCount() uint64 {
|
||||
// Get the number of subnets.
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the number of columns per subnet.
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Get the number of subnets we custody
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
// Finally, compute the number of columns we should custody.
|
||||
custodyColumnCount := custodySubnetCount * columnsPerSubnet
|
||||
|
||||
return custodyColumnCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// TODO: Add link to the specification once it is available.
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
func CustodyCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc Csc
|
||||
if err := record.Load(&csc); err != nil {
|
||||
return 0, errCannotLoadCustodySubnetCount
|
||||
}
|
||||
|
||||
return uint64(csc), nil
|
||||
}
|
||||
|
||||
func CanSelfReconstruct(numCol uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfColumns
|
||||
// if total is odd, then we need total / 2 + 1 columns to reconstruct
|
||||
// if total is even, then we need total / 2 columns to reconstruct
|
||||
columnsNeeded := total/2 + total%2
|
||||
return numCol >= columnsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
start := time.Now()
|
||||
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
log.WithFields(logrus.Fields{
|
||||
"elapsed": time.Since(start),
|
||||
"index": bIndex,
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
}).Debug("Recovered cells and proofs")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
544
beacon-chain/core/peerdas/helpers_test.go
Normal file
544
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,544 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(roCol)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := GetRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestCustodySubnetCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribeToAllSubnets=false",
|
||||
subscribeToAllSubnets: false,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "subscribeToAllSubnets=true",
|
||||
subscribeToAllSubnets: true,
|
||||
expected: params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set flags.
|
||||
resetFlags := flags.Get()
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = tc.subscribeToAllSubnets
|
||||
flags.Init(gFlags)
|
||||
|
||||
// Get the custody subnet count.
|
||||
actual := peerdas.CustodySubnetCount()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumnCount(t *testing.T) {
|
||||
const expected uint64 = 8
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DataColumnSidecarSubnetCount = 32
|
||||
config.CustodyRequirement = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
actual := peerdas.CustodyColumnCount()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfColumns uint64
|
||||
custodyNumberOfColumns uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=31",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=65, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=63, custodyNumberOfColumns=33",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.totalNumberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfColumns)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := GetRandBlob(int64(i))
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
5
beacon-chain/core/peerdas/log.go
Normal file
5
beacon-chain/core/peerdas/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package peerdas
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "peerdas")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user